1/*
2 **************************************************************************
3 * Copyright (c) 2014-2015, The Linux Foundation.  All rights reserved.
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 **************************************************************************
15 */
16
17#include <linux/version.h>
18#include <linux/types.h>
19#include <linux/ip.h>
20#include <linux/tcp.h>
21#include <linux/module.h>
22#include <linux/skbuff.h>
23#include <linux/icmp.h>
24#include <linux/debugfs.h>
25#include <linux/kthread.h>
26#include <linux/pkt_sched.h>
27#include <linux/string.h>
28#include <linux/ctype.h>
29#include <net/route.h>
30#include <net/ip.h>
31#include <net/tcp.h>
32#include <asm/unaligned.h>
33#include <asm/uaccess.h>	/* for put_user */
34#include <net/ipv6.h>
35#include <linux/inet.h>
36#include <linux/in.h>
37#include <linux/udp.h>
38#include <linux/tcp.h>
39
40#include <linux/netfilter_ipv4.h>
41#include <linux/netfilter_bridge.h>
42#include <net/netfilter/nf_conntrack.h>
43#include <net/netfilter/nf_conntrack_helper.h>
44#include <net/netfilter/nf_conntrack_l4proto.h>
45#include <net/netfilter/nf_conntrack_l3proto.h>
46#include <net/netfilter/nf_conntrack_core.h>
47#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
48#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
49
50/*
51 * Debug output levels
52 * 0 = OFF
53 * 1 = ASSERTS / ERRORS
54 * 2 = 1 + WARN
55 * 3 = 2 + INFO
56 * 4 = 3 + TRACE
57 */
58#define DEBUG_LEVEL ECM_CLASSIFIER_DEFAULT_DEBUG_LEVEL
59
60#include "ecm_types.h"
61#include "ecm_db_types.h"
62#include "ecm_state.h"
63#include "ecm_tracker.h"
64#include "ecm_classifier.h"
65#include "ecm_front_end_types.h"
66#include "ecm_tracker_datagram.h"
67#include "ecm_tracker_udp.h"
68#include "ecm_tracker_tcp.h"
69#include "ecm_db.h"
70#include "ecm_classifier_default.h"
71
72/*
73 * Magic numbers
74 */
75#define ECM_CLASSIFIER_DEFAULT_INTERNAL_INSTANCE_MAGIC 0x8761
76#define ECM_CLASSIFIER_DEFAULT_STATE_FILE_INSTANCE_MAGIC 0x3321
77
78/*
79 * struct ecm_classifier_default_internal_instance
80 * 	State to allow tracking of dynamic priority for a connection
81 */
82struct ecm_classifier_default_internal_instance {
83	struct ecm_classifier_default_instance base;		/* Base type */
84
85	uint32_t ci_serial;					/* RO: Serial of the connection */
86	int protocol;						/* RO: Protocol of the connection */
87
88	struct ecm_classifier_process_response process_response;
89								/* Last process response computed */
90
91	ecm_db_timer_group_t timer_group;			/* The timer group the connection should be in based on state */
92
93	ecm_tracker_sender_type_t ingress_sender;		/* RO: Which sender is sending ingress data */
94	ecm_tracker_sender_type_t egress_sender;		/* RO: Which sender is sending egress data */
95
96	struct ecm_tracker_instance *ti;			/* RO: Tracker used for state and timer group checking. Pointer will not change so safe to access outside of lock. */
97
98	int refs;						/* Integer to trap we never go negative */
99#if (DEBUG_LEVEL > 0)
100	uint16_t magic;
101#endif
102};
103
104static  DEFINE_SPINLOCK(ecm_classifier_default_lock);			/* Concurrency control SMP access */
105static int ecm_classifier_default_count = 0;			/* Tracks number of instances allocated */
106
107/*
108 * Operational control
109 */
110static ecm_classifier_acceleration_mode_t ecm_classifier_default_accel_mode = ECM_CLASSIFIER_ACCELERATION_MODE_ACCEL;
111								/* Cause connections whose hosts are both on-link to be accelerated */
112static bool ecm_classifier_default_enabled = true;		/* When disabled the qos algorithm will not be applied to skb's */
113
114/*
115 * Management thread control
116 */
117static bool ecm_classifier_default_terminate_pending = false;	/* True when the user wants us to terminate */
118
119/*
120 * Character device stuff - used to communicate status back to user space
121 */
122#define ECM_CLASSIFIER_DEFAULT_STATE_FILE_BUFFER_SIZE 1024
123struct ecm_classifier_default_state_file_instance {
124	struct ecm_classifier_default_internal_instance *cdii;
125	bool doc_start_written;
126	bool doc_end_written;
127	char msg_buffer[ECM_CLASSIFIER_DEFAULT_STATE_FILE_BUFFER_SIZE];	/* Used to hold the current state message being output */
128	char *msgp;							/* Points into the msg buffer as we output it piece by piece */
129	int msg_len;							/* Length of the buffer still to be written out */
130#if (DEBUG_LEVEL > 0)
131	uint16_t magic;
132#endif
133};
134static struct dentry *ecm_classifier_default_dentry;		/* Debugfs dentry object */
135
136/*
137 * _ecm_classifier_default_ref()
138 *	Ref
139 */
140static void _ecm_classifier_default_ref(struct ecm_classifier_default_internal_instance *cdii)
141{
142	cdii->refs++;
143	DEBUG_TRACE("%p: cdii ref %d\n", cdii, cdii->refs);
144	DEBUG_ASSERT(cdii->refs > 0, "%p: ref wrap\n", cdii);
145}
146
147/*
148 * ecm_classifier_default_ref()
149 *	Ref
150 */
151static void ecm_classifier_default_ref(struct ecm_classifier_instance *ci)
152{
153	struct ecm_classifier_default_internal_instance *cdii;
154	cdii = (struct ecm_classifier_default_internal_instance *)ci;
155
156	DEBUG_CHECK_MAGIC(cdii, ECM_CLASSIFIER_DEFAULT_INTERNAL_INSTANCE_MAGIC, "%p: magic failed", cdii);
157	spin_lock_bh(&ecm_classifier_default_lock);
158	_ecm_classifier_default_ref(cdii);
159	spin_unlock_bh(&ecm_classifier_default_lock);
160}
161
162/*
163 * ecm_classifier_default_deref()
164 *	Deref
165 */
166static int ecm_classifier_default_deref(struct ecm_classifier_instance *ci)
167{
168	struct ecm_classifier_default_internal_instance *cdii;
169	cdii = (struct ecm_classifier_default_internal_instance *)ci;
170
171	DEBUG_CHECK_MAGIC(cdii, ECM_CLASSIFIER_DEFAULT_INTERNAL_INSTANCE_MAGIC, "%p: magic failed", cdii);
172	spin_lock_bh(&ecm_classifier_default_lock);
173	cdii->refs--;
174	DEBUG_ASSERT(cdii->refs >= 0, "%p: refs wrapped\n", cdii);
175	DEBUG_TRACE("%p: Default classifier deref %d\n", cdii, cdii->refs);
176	if (cdii->refs) {
177		int refs = cdii->refs;
178		spin_unlock_bh(&ecm_classifier_default_lock);
179		return refs;
180	}
181
182	/*
183	 * Object to be destroyed
184	 */
185	ecm_classifier_default_count--;
186	DEBUG_ASSERT(ecm_classifier_default_count >= 0, "%p: ecm_classifier_default_count wrap\n", cdii);
187
188	spin_unlock_bh(&ecm_classifier_default_lock);
189
190	/*
191	 * Release our tracker
192	 */
193	cdii->ti->deref(cdii->ti);
194
195	/*
196	 * Final
197	 */
198	DEBUG_INFO("%p: Final default classifier instance\n", cdii);
199	kfree(cdii);
200
201	return 0;
202}
203
204/*
205 * ecm_classifier_default_process_callback()
206 *	Process new data updating the priority
207 *
208 * NOTE: This function would only ever be called if all other classifiers have failed.
209 */
210static void ecm_classifier_default_process(struct ecm_classifier_instance *aci, ecm_tracker_sender_type_t sender,
211									struct ecm_tracker_ip_header *ip_hdr, struct sk_buff *skb,
212									struct ecm_classifier_process_response *process_response)
213{
214	struct ecm_tracker_instance *ti;
215	ecm_tracker_sender_state_t from_state;
216	ecm_tracker_sender_state_t to_state;
217	ecm_tracker_connection_state_t prevailing_state;
218	ecm_db_timer_group_t tg;
219	struct ecm_classifier_default_internal_instance *cdii = (struct ecm_classifier_default_internal_instance *)aci;
220	struct nf_conn *ct;
221	enum ip_conntrack_info ctinfo;
222	DEBUG_CHECK_MAGIC(cdii, ECM_CLASSIFIER_DEFAULT_INTERNAL_INSTANCE_MAGIC, "%p: invalid state magic\n", cdii);
223
224
225	spin_lock_bh(&ecm_classifier_default_lock);
226
227	/*
228	 * Get qos result and accel mode
229	 * Default classifier is rarely disabled.
230	 */
231	if (unlikely(!ecm_classifier_default_enabled)) {
232		/*
233		 * Still relevant but have no actions that need processing
234		 */
235		cdii->process_response.process_actions = 0;
236		*process_response = cdii->process_response;
237		spin_unlock_bh(&ecm_classifier_default_lock);
238		return;
239	}
240
241	/*
242	 * Accel?
243	 */
244	if (ecm_classifier_default_accel_mode != ECM_CLASSIFIER_ACCELERATION_MODE_DONT_CARE) {
245		cdii->process_response.accel_mode = ecm_classifier_default_accel_mode;
246		cdii->process_response.process_actions |= ECM_CLASSIFIER_PROCESS_ACTION_ACCEL_MODE;
247	} else {
248		cdii->process_response.process_actions &= ~ECM_CLASSIFIER_PROCESS_ACTION_ACCEL_MODE;
249	}
250	spin_unlock_bh(&ecm_classifier_default_lock);
251
252	/*
253	 * Update connection state
254	 * Compute the timer group this connection should be in.
255	 * For this we need the tracker and the state to be updated.
256	 * NOTE: Tracker does not need to be ref'd it will exist for as long as this default classifier instance does
257	 * which is at least for the duration of this call.
258	 */
259	ti = cdii->ti;
260	ti->state_update(ti, sender, ip_hdr, skb);
261	ti->state_get(ti, &from_state, &to_state, &prevailing_state, &tg);
262	spin_lock_bh(&ecm_classifier_default_lock);
263	if (unlikely(cdii->timer_group != tg)) {
264		/*
265		 * Timer group has changed
266		 */
267		cdii->process_response.process_actions |= ECM_CLASSIFIER_PROCESS_ACTION_TIMER_GROUP;
268		cdii->process_response.timer_group = tg;
269
270		/*
271		 * Record for future change comparisons
272		 */
273		cdii->timer_group = tg;
274	}
275	spin_unlock_bh(&ecm_classifier_default_lock);
276
277	/*
278	 * Handle non-TCP case
279	 */
280	if (cdii->protocol != IPPROTO_TCP) {
281		if (unlikely(prevailing_state != ECM_TRACKER_CONNECTION_STATE_ESTABLISHED)) {
282			cdii->process_response.accel_mode = ECM_CLASSIFIER_ACCELERATION_MODE_NO;
283		}
284		goto return_response;
285	}
286
287	/*
288	 * Check the TCP connection state.
289	 * If we are not established then we deny acceleration.
290	 * Take lead from conntrack if exists.
291	 */
292	ct = nf_ct_get(skb, &ctinfo);
293	if (ct == NULL) {
294		DEBUG_TRACE("%p: No Conntrack found for packet, using ECM tracker state\n", cdii);
295		if (unlikely(prevailing_state != ECM_TRACKER_CONNECTION_STATE_ESTABLISHED)) {
296			cdii->process_response.accel_mode = ECM_CLASSIFIER_ACCELERATION_MODE_NO;
297			goto return_response;
298		}
299	} else {
300		/*
301		 * Don't try to manage a non-established connection.
302		 */
303		if (!test_bit(IPS_ASSURED_BIT, &ct->status)) {
304			DEBUG_TRACE("%p: Non-established connection\n", ct);
305			cdii->process_response.accel_mode = ECM_CLASSIFIER_ACCELERATION_MODE_NO;
306			goto return_response;
307		}
308
309		/*
310		 * If the connection is shutting down do not manage it.
311		 * state can not be SYN_SENT, SYN_RECV because connection is assured
312		 * Not managed states: FIN_WAIT, CLOSE_WAIT, LAST_ACK, TIME_WAIT, CLOSE.
313		 */
314		spin_lock_bh(&ct->lock);
315		if (ct->proto.tcp.state != TCP_CONNTRACK_ESTABLISHED) {
316			spin_unlock_bh(&ct->lock);
317			DEBUG_TRACE("%p: Connection in termination state %#X\n", ct, ct->proto.tcp.state);
318			cdii->process_response.accel_mode = ECM_CLASSIFIER_ACCELERATION_MODE_NO;
319			goto return_response;
320		}
321		spin_unlock_bh(&ct->lock);
322	}
323
324return_response:
325	;
326
327	/*
328	 * Return the process response
329	 */
330	spin_lock_bh(&ecm_classifier_default_lock);
331	*process_response = cdii->process_response;
332	spin_unlock_bh(&ecm_classifier_default_lock);
333}
334
335/*
336 * ecm_classifier_default_type_get()
337 *	Get type of classifier this is
338 */
339static ecm_classifier_type_t ecm_classifier_default_type_get(struct ecm_classifier_instance *aci)
340{
341	struct ecm_classifier_default_internal_instance *cdii;
342	cdii = (struct ecm_classifier_default_internal_instance *)aci;
343
344	DEBUG_CHECK_MAGIC(cdii, ECM_CLASSIFIER_DEFAULT_INTERNAL_INSTANCE_MAGIC, "%p: magic failed", cdii);
345	return ECM_CLASSIFIER_TYPE_DEFAULT;
346}
347
348/*
349 * ecm_classifier_default_reclassify_allowed()
350 *	Get whether reclassification is allowed
351 */
352static bool ecm_classifier_default_reclassify_allowed(struct ecm_classifier_instance *aci)
353{
354	struct ecm_classifier_default_internal_instance *cdii;
355	cdii = (struct ecm_classifier_default_internal_instance *)aci;
356
357	DEBUG_CHECK_MAGIC(cdii, ECM_CLASSIFIER_DEFAULT_INTERNAL_INSTANCE_MAGIC, "%p: magic failed", cdii);
358	return true;
359}
360
361/*
362 * ecm_classifier_default_reclassify()
363 *	Reclassify
364 */
365static void ecm_classifier_default_reclassify(struct ecm_classifier_instance *aci)
366{
367	struct ecm_classifier_default_internal_instance *cdii;
368	cdii = (struct ecm_classifier_default_internal_instance *)aci;
369	DEBUG_CHECK_MAGIC(cdii, ECM_CLASSIFIER_DEFAULT_INTERNAL_INSTANCE_MAGIC, "%p: magic failed", cdii);
370}
371
372/*
373 * ecm_classifier_default_last_process_response_get()
374 *	Get result code returned by the last process call
375 */
376static void ecm_classifier_default_last_process_response_get(struct ecm_classifier_instance *aci,
377							struct ecm_classifier_process_response *process_response)
378{
379	struct ecm_classifier_default_internal_instance *cdii;
380	cdii = (struct ecm_classifier_default_internal_instance *)aci;
381	DEBUG_CHECK_MAGIC(cdii, ECM_CLASSIFIER_DEFAULT_INTERNAL_INSTANCE_MAGIC, "%p: magic failed", cdii);
382
383	spin_lock_bh(&ecm_classifier_default_lock);
384	*process_response = cdii->process_response;
385	spin_unlock_bh(&ecm_classifier_default_lock);
386}
387
388/*
389 * ecm_classifier_default_sync_to_v4()
390 *	Front end is pushing accel engine state to us
391 */
392static void ecm_classifier_default_sync_to_v4(struct ecm_classifier_instance *aci, struct ecm_classifier_rule_sync *sync)
393{
394	struct ecm_classifier_default_internal_instance *cdii __attribute__((unused));
395
396	cdii = (struct ecm_classifier_default_internal_instance *)aci;
397	DEBUG_CHECK_MAGIC(cdii, ECM_CLASSIFIER_DEFAULT_INTERNAL_INSTANCE_MAGIC, "%p: magic failed", cdii);
398}
399
400/*
401 * ecm_classifier_default_sync_from_v4()
402 *	Front end is retrieving accel engine state from us
403 */
404static void ecm_classifier_default_sync_from_v4(struct ecm_classifier_instance *aci, struct ecm_classifier_rule_create *ecrc)
405{
406	struct ecm_classifier_default_internal_instance *cdii __attribute__((unused));
407
408	cdii = (struct ecm_classifier_default_internal_instance *)aci;
409	DEBUG_CHECK_MAGIC(cdii, ECM_CLASSIFIER_DEFAULT_INTERNAL_INSTANCE_MAGIC, "%p: magic failed", cdii);
410}
411
412/*
413 * ecm_classifier_default_sync_to_v6()
414 *	Front end is pushing accel engine state to us
415 */
416static void ecm_classifier_default_sync_to_v6(struct ecm_classifier_instance *aci, struct ecm_classifier_rule_sync *sync)
417{
418	struct ecm_classifier_default_internal_instance *cdii __attribute__((unused));
419
420	cdii = (struct ecm_classifier_default_internal_instance *)aci;
421	DEBUG_CHECK_MAGIC(cdii, ECM_CLASSIFIER_DEFAULT_INTERNAL_INSTANCE_MAGIC, "%p: magic failed", cdii);
422}
423
424/*
425 * ecm_classifier_default_sync_from_v6()
426 *	Front end is retrieving accel engine state from us
427 */
428static void ecm_classifier_default_sync_from_v6(struct ecm_classifier_instance *aci, struct ecm_classifier_rule_create *ecrc)
429{
430	struct ecm_classifier_default_internal_instance *cdii __attribute__((unused));
431
432	cdii = (struct ecm_classifier_default_internal_instance *)aci;
433	DEBUG_CHECK_MAGIC(cdii, ECM_CLASSIFIER_DEFAULT_INTERNAL_INSTANCE_MAGIC, "%p: magic failed", cdii);
434}
435
436/*
437 * ecm_classifier_tracker_get_and_ref()
438 *	Obtain default classifiers tracker (usually for state tracking for the connection as it always exists for the connection)
439 */
440static struct ecm_tracker_instance *ecm_classifier_tracker_get_and_ref(struct ecm_classifier_default_instance *dci)
441{
442	struct ecm_classifier_default_internal_instance *cdii;
443	struct ecm_tracker_instance *ti;
444
445	cdii = (struct ecm_classifier_default_internal_instance *)dci;
446	DEBUG_CHECK_MAGIC(cdii, ECM_CLASSIFIER_DEFAULT_INTERNAL_INSTANCE_MAGIC, "%p: magic failed", cdii);
447
448	ti = cdii->ti;
449	ti->ref(ti);
450	return ti;
451}
452
453#ifdef ECM_STATE_OUTPUT_ENABLE
454/*
455 * ecm_classifier_default_state_get()
456 *	Return state
457 */
458static int ecm_classifier_default_state_get(struct ecm_classifier_instance *ci, struct ecm_state_file_instance *sfi)
459{
460	int result;
461	struct ecm_classifier_default_internal_instance *cdii;
462	struct ecm_classifier_process_response process_response;
463	ecm_db_timer_group_t timer_group;
464	ecm_tracker_sender_type_t ingress_sender;
465	ecm_tracker_sender_type_t egress_sender;
466
467	cdii = (struct ecm_classifier_default_internal_instance *)ci;
468	DEBUG_CHECK_MAGIC(cdii, ECM_CLASSIFIER_DEFAULT_INTERNAL_INSTANCE_MAGIC, "%p: magic failed", cdii);
469
470	if ((result = ecm_state_prefix_add(sfi, "default"))) {
471		return result;
472	}
473
474	spin_lock_bh(&ecm_classifier_default_lock);
475	egress_sender = cdii->egress_sender;
476	ingress_sender = cdii->ingress_sender;
477	timer_group = cdii->timer_group;
478	process_response = cdii->process_response;
479	spin_unlock_bh(&ecm_classifier_default_lock);
480
481	if ((result = ecm_state_write(sfi, "ingress_sender", "%d", ingress_sender))) {
482		return result;
483	}
484	if ((result = ecm_state_write(sfi, "egress_sender", "%d", egress_sender))) {
485		return result;
486	}
487	if ((result = ecm_state_write(sfi, "timer_group", "%d", timer_group))) {
488		return result;
489	}
490
491	/*
492	 * Output our last process response
493	 */
494	if ((result = ecm_classifier_process_response_state_get(sfi, &process_response))) {
495		return result;
496	}
497
498	if ((result = ecm_state_prefix_add(sfi, "trackers"))) {
499		return result;
500	}
501
502	/*
503	 * Output our tracker state
504	 */
505	if ((result = cdii->ti->state_text_get(cdii->ti, sfi))) {
506		return result;
507	}
508
509	if ((result = ecm_state_prefix_remove(sfi))) {
510		return result;
511	}
512
513	return ecm_state_prefix_remove(sfi);
514}
515#endif
516
517/*
518 * ecm_classifier_default_instance_alloc()
519 *	Allocate an instance of the default classifier
520 */
521struct ecm_classifier_default_instance *ecm_classifier_default_instance_alloc(struct ecm_db_connection_instance *ci, int protocol, ecm_db_direction_t dir, int from_port, int to_port)
522{
523	struct ecm_classifier_default_internal_instance *cdii;
524	struct ecm_classifier_default_instance *cdi;
525
526	/*
527	 * Allocate the instance
528	 */
529	cdii = (struct ecm_classifier_default_internal_instance *)kzalloc(sizeof(struct ecm_classifier_default_internal_instance), GFP_ATOMIC | __GFP_NOWARN);
530	if (!cdii) {
531		DEBUG_WARN("Failed to allocate default instance\n");
532		return NULL;
533	}
534
535	/*
536	 * Allocate a tracker for state etc.
537	 */
538	if (protocol == IPPROTO_TCP) {
539		DEBUG_TRACE("%p: Alloc tracker for TCP connection: %p\n", cdii, ci);
540		cdii->ti = (struct ecm_tracker_instance *)ecm_tracker_tcp_alloc();
541		if (!cdii->ti) {
542			DEBUG_WARN("%p: Failed to alloc tracker\n", cdii);
543			kfree(cdii);
544			return NULL;
545		}
546		ecm_tracker_tcp_init((struct ecm_tracker_tcp_instance *)cdii->ti, ECM_TRACKER_CONNECTION_TRACKING_LIMIT_DEFAULT, 1500, 1500);
547	} else if (protocol == IPPROTO_UDP) {
548		DEBUG_TRACE("%p: Alloc tracker for UDP connection: %p\n", cdii, ci);
549		cdii->ti = (struct ecm_tracker_instance *)ecm_tracker_udp_alloc();
550		if (!cdii->ti) {
551			DEBUG_WARN("%p: Failed to alloc tracker\n", cdii);
552			kfree(cdii);
553			return NULL;
554		}
555		ecm_tracker_udp_init((struct ecm_tracker_udp_instance *)cdii->ti, ECM_TRACKER_CONNECTION_TRACKING_LIMIT_DEFAULT, from_port, to_port);
556	} else {
557		DEBUG_TRACE("%p: Alloc tracker for non-ported connection: %p\n", cdii, ci);
558		cdii->ti = (struct ecm_tracker_instance *)ecm_tracker_datagram_alloc();
559		if (!cdii->ti) {
560			DEBUG_WARN("%p: Failed to alloc tracker\n", cdii);
561			kfree(cdii);
562			return NULL;
563		}
564		ecm_tracker_datagram_init((struct ecm_tracker_datagram_instance *)cdii->ti, ECM_TRACKER_CONNECTION_TRACKING_LIMIT_DEFAULT);
565	}
566
567	DEBUG_SET_MAGIC(cdii, ECM_CLASSIFIER_DEFAULT_INTERNAL_INSTANCE_MAGIC);
568	cdii->refs = 1;
569	cdii->ci_serial = ecm_db_connection_serial_get(ci);
570	cdii->protocol = protocol;
571
572	/*
573	 * We are always relevant to the connection
574	 */
575	cdii->process_response.relevance = ECM_CLASSIFIER_RELEVANCE_YES;
576
577	/*
578	 * Using the connection direction identify egress and ingress host addresses
579	 */
580	if (dir == ECM_DB_DIRECTION_INGRESS_NAT) {
581		cdii->ingress_sender = ECM_TRACKER_SENDER_TYPE_SRC;
582		cdii->egress_sender = ECM_TRACKER_SENDER_TYPE_DEST;
583	} else {
584		cdii->egress_sender = ECM_TRACKER_SENDER_TYPE_SRC;
585		cdii->ingress_sender = ECM_TRACKER_SENDER_TYPE_DEST;
586	}
587	DEBUG_TRACE("%p: Ingress sender = %d egress sender = %d\n", cdii, cdii->ingress_sender, cdii->egress_sender);
588
589	/*
590	 * Methods specific to the default classifier
591	 */
592	cdi = (struct ecm_classifier_default_instance *)cdii;
593	cdi->tracker_get_and_ref = ecm_classifier_tracker_get_and_ref;
594
595	/*
596	 * Methods generic to all classifiers.
597	 */
598	cdi->base.process = ecm_classifier_default_process;
599	cdi->base.sync_from_v4 = ecm_classifier_default_sync_from_v4;
600	cdi->base.sync_to_v4 = ecm_classifier_default_sync_to_v4;
601	cdi->base.sync_from_v6 = ecm_classifier_default_sync_from_v6;
602	cdi->base.sync_to_v6 = ecm_classifier_default_sync_to_v6;
603	cdi->base.type_get = ecm_classifier_default_type_get;
604	cdi->base.reclassify_allowed = ecm_classifier_default_reclassify_allowed;
605	cdi->base.reclassify = ecm_classifier_default_reclassify;
606	cdi->base.last_process_response_get = ecm_classifier_default_last_process_response_get;
607#ifdef ECM_STATE_OUTPUT_ENABLE
608	cdi->base.state_get = ecm_classifier_default_state_get;
609#endif
610	cdi->base.ref = ecm_classifier_default_ref;
611	cdi->base.deref = ecm_classifier_default_deref;
612
613	spin_lock_bh(&ecm_classifier_default_lock);
614
615	/*
616	 * Final check if we are pending termination
617	 */
618	if (ecm_classifier_default_terminate_pending) {
619		spin_unlock_bh(&ecm_classifier_default_lock);
620		DEBUG_INFO("%p: Terminating\n", ci);
621		cdii->ti->deref(cdii->ti);
622		kfree(cdii);
623		return NULL;
624	}
625
626	/*
627	 * Increment stats
628	 */
629	ecm_classifier_default_count++;
630	DEBUG_ASSERT(ecm_classifier_default_count > 0, "%p: ecm_classifier_default_count wrap\n", cdii);
631	spin_unlock_bh(&ecm_classifier_default_lock);
632
633	DEBUG_INFO("Default classifier instance alloc: %p\n", cdii);
634	return cdi;
635}
636EXPORT_SYMBOL(ecm_classifier_default_instance_alloc);
637
638/*
639 * ecm_classifier_default_init()
640 */
641int ecm_classifier_default_init(struct dentry *dentry)
642{
643	DEBUG_INFO("Default classifier Module init\n");
644
645	DEBUG_ASSERT(ECM_CLASSIFIER_TYPE_DEFAULT == 0, "DO NOT CHANGE DEFAULT PRIORITY");
646
647	ecm_classifier_default_dentry = debugfs_create_dir("ecm_classifier_default", dentry);
648	if (!ecm_classifier_default_dentry) {
649		DEBUG_ERROR("Failed to create ecm default classifier directory in debugfs\n");
650		return -1;
651	}
652
653	if (!debugfs_create_bool("enabled", S_IRUGO | S_IWUSR, ecm_classifier_default_dentry,
654					(u32 *)&ecm_classifier_default_enabled)) {
655		DEBUG_ERROR("Failed to create ecm deafult classifier enabled file in debugfs\n");
656		debugfs_remove_recursive(ecm_classifier_default_dentry);
657		return -1;
658	}
659
660	if (!debugfs_create_u32("accel_mode", S_IRUGO | S_IWUSR, ecm_classifier_default_dentry,
661					(u32 *)&ecm_classifier_default_accel_mode)) {
662		DEBUG_ERROR("Failed to create ecm deafult classifier accel_mode file in debugfs\n");
663		debugfs_remove_recursive(ecm_classifier_default_dentry);
664		return -1;
665	}
666
667	return 0;
668}
669EXPORT_SYMBOL(ecm_classifier_default_init);
670
671/*
672 * ecm_classifier_default_exit()
673 */
674void ecm_classifier_default_exit(void)
675{
676	DEBUG_INFO("Default classifier Module exit\n");
677	spin_lock_bh(&ecm_classifier_default_lock);
678	ecm_classifier_default_terminate_pending = true;
679	spin_unlock_bh(&ecm_classifier_default_lock);
680
681	/*
682	 * Remove the debugfs files recursively.
683	 */
684	if (ecm_classifier_default_dentry) {
685		debugfs_remove_recursive(ecm_classifier_default_dentry);
686	}
687}
688EXPORT_SYMBOL(ecm_classifier_default_exit);
689