1/*
2 **************************************************************************
3 * Copyright (c) 2015 The Linux Foundation.  All rights reserved.
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 **************************************************************************
15 */
16
17#include <sfe_drv.h>
18
19extern int ecm_sfe_ipv4_no_action_limit_default;		/* Default no-action limit. */
20extern int ecm_sfe_ipv4_driver_fail_limit_default;		/* Default driver fail limit. */
21extern int ecm_sfe_ipv4_nack_limit_default;			/* Default nack limit. */
22extern int ecm_sfe_ipv4_accelerated_count;			/* Total offloads */
23extern int ecm_sfe_ipv4_pending_accel_count;			/* Total pending offloads issued to the SFE / awaiting completion */
24extern int ecm_sfe_ipv4_pending_decel_count;			/* Total pending deceleration requests issued to the SFE / awaiting completion */
25
26/*
27 * Limiting the acceleration of connections.
28 *
29 * By default there is no acceleration limiting.
30 * This means that when ECM has more connections (that can be accelerated) than the acceleration
31 * engine will allow the ECM will continue to try to accelerate.
32 * In this scenario the acceleration engine will begin removal of existing rules to make way for new ones.
33 * When the accel_limit_mode is set to FIXED ECM will not permit more rules to be issued than the engine will allow.
34 */
35extern uint32_t ecm_sfe_ipv4_accel_limit_mode;
36
37/*
38 * Locking of the classifier - concurrency control for file global parameters.
39 * NOTE: It is safe to take this lock WHILE HOLDING a feci->lock.  The reverse is NOT SAFE.
40 */
41extern spinlock_t ecm_sfe_ipv4_lock;			/* Protect against SMP access between netfilter, events and private threaded function. */
42
43/*
44 * Management thread control
45 */
46extern bool ecm_sfe_ipv4_terminate_pending;		/* True when the user has signalled we should quit */
47
48/*
49 * sfe driver linkage
50 */
51extern struct sfe_drv_ctx_instance *ecm_sfe_ipv4_drv_mgr;
52
53/*
54 * ecm_sfe_ipv4_accel_pending_set()
55 *	Set pending acceleration for the connection object.
56 *
57 * Return false if the acceleration is not permitted or is already in progress.
58 */
59static inline bool ecm_sfe_ipv4_accel_pending_set(struct ecm_front_end_connection_instance *feci)
60{
61	DEBUG_INFO("%p: Accel conn: %p\n", feci, feci->ci);
62
63	/*
64	 * If re-generation is required then we cannot permit acceleration
65	 */
66	if (ecm_db_connection_regeneration_required_peek(feci->ci)) {
67		DEBUG_TRACE("%p: accel %p failed - regen required\n", feci, feci->ci);
68		return false;
69	}
70
71	/*
72	 * Is connection acceleration permanently failed?
73	 */
74	spin_lock_bh(&feci->lock);
75	if (ECM_FRONT_END_ACCELERATION_FAILED(feci->accel_mode)) {
76		spin_unlock_bh(&feci->lock);
77		DEBUG_TRACE("%p: accel %p failed\n", feci, feci->ci);
78		return false;
79	}
80
81	/*
82	 * If acceleration mode is anything other than "not accelerated" then ignore.
83	 */
84	if (feci->accel_mode != ECM_FRONT_END_ACCELERATION_MODE_DECEL) {
85		spin_unlock_bh(&feci->lock);
86		DEBUG_TRACE("%p: Ignoring wrong mode accel for conn: %p\n", feci, feci->ci);
87		return false;
88	}
89
90	/*
91	 * Do we have a fixed upper limit for acceleration?
92	 */
93	spin_lock_bh(&ecm_sfe_ipv4_lock);
94	if (ecm_sfe_ipv4_accel_limit_mode & ECM_FRONT_END_ACCEL_LIMIT_MODE_FIXED) {
95		if ((ecm_sfe_ipv4_pending_accel_count + ecm_sfe_ipv4_accelerated_count) >= sfe_drv_ipv4_max_conn_count()) {
96			spin_unlock_bh(&ecm_sfe_ipv4_lock);
97			spin_unlock_bh(&feci->lock);
98			DEBUG_INFO("%p: Accel limit reached, accel denied: %p\n", feci, feci->ci);
99			return false;
100		}
101	}
102
103	/*
104	 * Okay to accelerate
105	 */
106	ecm_sfe_ipv4_pending_accel_count++;
107	spin_unlock_bh(&ecm_sfe_ipv4_lock);
108
109	/*
110	 * Okay connection can be set to pending acceleration
111	 */
112	feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_ACCEL_PENDING;
113	spin_unlock_bh(&feci->lock);
114	return true;
115}
116
117/*
118 * _ecm_sfe_ipv4_accel_pending_clear()
119 *	Clear pending acceleration for the connection object, setting it to the desired state.
120 *
121 * Returns true if "decelerate was pending".
122 *
123 * The feci->lock AND ecm_sfe_ipv4_lock must be held on entry.
124 */
125static inline bool _ecm_sfe_ipv4_accel_pending_clear(struct ecm_front_end_connection_instance *feci, ecm_front_end_acceleration_mode_t mode)
126{
127	bool decel_pending;
128
129	/*
130	 * Set the mode away from its accel pending state.
131	 */
132	DEBUG_ASSERT(feci->accel_mode == ECM_FRONT_END_ACCELERATION_MODE_ACCEL_PENDING, "%p: Accel mode unexpected: %d\n", feci, feci->accel_mode);
133	feci->accel_mode = mode;
134
135	/*
136	 * Clear decelerate pending flag.
137	 * This flag is only set when we are ACCEL_PENDING -
138	 * and we are moving from that to the given mode anyway.
139	 */
140	decel_pending = feci->stats.decelerate_pending;
141	feci->stats.decelerate_pending = false;
142
143	/*
144	 * Decrement pending counter
145	 */
146	ecm_sfe_ipv4_pending_accel_count--;
147	DEBUG_ASSERT(ecm_sfe_ipv4_pending_accel_count >= 0, "Accel pending underflow\n");
148	return decel_pending;
149}
150
151/*
152 * ecm_sfe_ipv4_accel_pending_clear()
153 *	Clear pending acceleration for the connection object, setting it to the desired state.
154 */
155static inline bool ecm_sfe_ipv4_accel_pending_clear(struct ecm_front_end_connection_instance *feci, ecm_front_end_acceleration_mode_t mode)
156{
157	bool decel_pending;
158	spin_lock_bh(&feci->lock);
159	spin_lock_bh(&ecm_sfe_ipv4_lock);
160	decel_pending = _ecm_sfe_ipv4_accel_pending_clear(feci, mode);
161	spin_unlock_bh(&ecm_sfe_ipv4_lock);
162	spin_unlock_bh(&feci->lock);
163	return decel_pending;
164}
165
166extern int ecm_sfe_ipv4_conntrack_event(unsigned long events, struct nf_conn *ct);
167extern void ecm_sfe_ipv4_accel_done_time_update(struct ecm_front_end_connection_instance *feci);
168extern void ecm_sfe_ipv4_decel_done_time_update(struct ecm_front_end_connection_instance *feci);
169extern struct ecm_classifier_instance *ecm_sfe_ipv4_assign_classifier(struct ecm_db_connection_instance *ci, ecm_classifier_type_t type);
170extern bool ecm_sfe_ipv4_reclassify(struct ecm_db_connection_instance *ci, int assignment_count, struct ecm_classifier_instance *assignments[]);
171extern void ecm_sfe_ipv4_connection_regenerate(struct ecm_db_connection_instance *ci, ecm_tracker_sender_type_t sender,
172							struct net_device *out_dev, struct net_device *out_dev_nat,
173							struct net_device *in_dev, struct net_device *in_dev_nat, __be16 *layer4hdr);
174extern struct ecm_db_node_instance *ecm_sfe_ipv4_node_establish_and_ref(struct ecm_front_end_connection_instance *feci,
175							struct net_device *dev, ip_addr_t addr,
176							struct ecm_db_iface_instance *interface_list[], int32_t interface_list_first,
177							uint8_t *given_node_addr);
178extern struct ecm_db_host_instance *ecm_sfe_ipv4_host_establish_and_ref(ip_addr_t addr);
179extern struct ecm_db_mapping_instance *ecm_sfe_ipv4_mapping_establish_and_ref(ip_addr_t addr, int port);
180extern int ecm_sfe_ipv4_init(struct dentry *dentry);
181extern void ecm_sfe_ipv4_stop(int);
182extern void ecm_sfe_ipv4_exit(void);
183