1/*
2 **************************************************************************
3 * Copyright (c) 2014-2015 The Linux Foundation.  All rights reserved.
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 **************************************************************************
15 */
16
17#include <linux/version.h>
18#include <linux/types.h>
19#include <linux/ip.h>
20#include <linux/tcp.h>
21#include <linux/module.h>
22#include <linux/skbuff.h>
23#include <linux/icmp.h>
24#include <linux/kthread.h>
25#include <linux/pkt_sched.h>
26#include <linux/debugfs.h>
27#include <linux/string.h>
28#include <net/route.h>
29#include <net/ip.h>
30#include <net/tcp.h>
31#include <asm/unaligned.h>
32#include <asm/uaccess.h>	/* for put_user */
33#include <net/ipv6.h>
34#include <linux/inet.h>
35#include <linux/in.h>
36#include <linux/udp.h>
37#include <linux/tcp.h>
38
39#include <linux/inetdevice.h>
40#include <linux/if_arp.h>
41#include <linux/netfilter_ipv4.h>
42#include <linux/netfilter_bridge.h>
43#include <linux/if_bridge.h>
44#include <net/arp.h>
45#include <net/netfilter/nf_conntrack.h>
46#include <net/netfilter/nf_conntrack_acct.h>
47#include <net/netfilter/nf_conntrack_helper.h>
48#include <net/netfilter/nf_conntrack_l4proto.h>
49#include <net/netfilter/nf_conntrack_l3proto.h>
50#include <net/netfilter/nf_conntrack_zones.h>
51#include <net/netfilter/nf_conntrack_core.h>
52#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
53#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
54#ifdef ECM_INTERFACE_VLAN_ENABLE
55#include <linux/../../net/8021q/vlan.h>
56#include <linux/if_vlan.h>
57#endif
58
59/*
60 * Debug output levels
61 * 0 = OFF
62 * 1 = ASSERTS / ERRORS
63 * 2 = 1 + WARN
64 * 3 = 2 + INFO
65 * 4 = 3 + TRACE
66 */
67#define DEBUG_LEVEL ECM_NSS_PORTED_IPV4_DEBUG_LEVEL
68
69#include <nss_api_if.h>
70
71#include "ecm_types.h"
72#include "ecm_db_types.h"
73#include "ecm_state.h"
74#include "ecm_tracker.h"
75#include "ecm_classifier.h"
76#include "ecm_front_end_types.h"
77#include "ecm_tracker_datagram.h"
78#include "ecm_tracker_udp.h"
79#include "ecm_tracker_tcp.h"
80#include "ecm_db.h"
81#include "ecm_classifier_default.h"
82#include "ecm_interface.h"
83#include "ecm_nss_ported_ipv4.h"
84#include "ecm_nss_ipv4.h"
85#include "ecm_nss_common.h"
86
87/*
88 * Magic numbers
89 */
90#define ECM_NSS_PORTED_IPV4_CONNECTION_INSTANCE_MAGIC 0xEB12
91
92/*
93 * Protocol type that ported file supports.
94 */
95enum ecm_nss_ported_ipv4_proto_types {
96	ECM_NSS_PORTED_IPV4_PROTO_TCP = 0,
97	ECM_NSS_PORTED_IPV4_PROTO_UDP,
98	ECM_NSS_PORTED_IPV4_PROTO_MAX
99
100};
101
102/*
103 * struct ecm_nss_ipv4_ported_connection_instance
104 *	A connection specific front end instance for PORTED connections
105 */
106struct ecm_nss_ported_ipv4_connection_instance {
107	struct ecm_front_end_connection_instance base;		/* Base class */
108	uint8_t ported_accelerated_count_index;			/* Index value of accelerated count array (UDP or TCP) */
109#if (DEBUG_LEVEL > 0)
110	uint16_t magic;
111#endif
112};
113
114static int ecm_nss_ported_ipv4_accelerated_count[ECM_NSS_PORTED_IPV4_PROTO_MAX] = {0};
115						/* Array of Number of TCP and UDP connections currently offloaded */
116
117/*
118 * Expose what should be a static flag in the TCP connection tracker.
119 */
120#ifdef ECM_OPENWRT_SUPPORT
121extern int nf_ct_tcp_no_window_check;
122#endif
123extern int nf_ct_tcp_be_liberal;
124
125/*
126 * ecm_nss_ported_ipv4_connection_callback()
127 *	Callback for handling create ack/nack calls.
128 */
129static void ecm_nss_ported_ipv4_connection_callback(void *app_data, struct nss_ipv4_msg *nim)
130{
131	struct nss_ipv4_rule_create_msg * __attribute__((unused)) nircm = &nim->msg.rule_create;
132	uint32_t serial = (uint32_t)app_data;
133	struct ecm_db_connection_instance *ci;
134	struct ecm_front_end_connection_instance *feci;
135	struct ecm_nss_ported_ipv4_connection_instance *npci;
136	ecm_front_end_acceleration_mode_t result_mode;
137
138	/*
139	 * Is this a response to a create message?
140	 */
141	if (nim->cm.type != NSS_IPV4_TX_CREATE_RULE_MSG) {
142		DEBUG_ERROR("%p: ported create callback with improper type: %d, serial: %u\n", nim, nim->cm.type, serial);
143		return;
144	}
145
146	/*
147	 * Look up ecm connection so that we can update the status.
148	 */
149	ci = ecm_db_connection_serial_find_and_ref(serial);
150	if (!ci) {
151		DEBUG_TRACE("%p: create callback, connection not found, serial: %u\n", nim, serial);
152		return;
153	}
154
155	/*
156	 * Release ref held for this ack/nack response.
157	 * NOTE: It's okay to do this here, ci won't go away, because the ci is held as
158	 * a result of the ecm_db_connection_serial_find_and_ref()
159	 */
160	ecm_db_connection_deref(ci);
161
162	/*
163	 * Get the front end instance
164	 */
165	feci = ecm_db_connection_front_end_get_and_ref(ci);
166	npci = (struct ecm_nss_ported_ipv4_connection_instance *)feci;
167	DEBUG_CHECK_MAGIC(npci, ECM_NSS_PORTED_IPV4_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", npci);
168
169	/*
170	 * Record command duration
171	 */
172	ecm_nss_ipv4_accel_done_time_update(feci);
173
174	/*
175	 * Dump some useful trace information.
176	 */
177	DEBUG_TRACE("%p: accelerate response for connection: %p, serial: %u\n", npci, feci->ci, serial);
178	DEBUG_TRACE("%p: rule_flags: %x, valid_flags: %x\n", npci, nircm->rule_flags, nircm->valid_flags);
179	DEBUG_TRACE("%p: flow_ip: %pI4h:%d\n", npci, &nircm->tuple.flow_ip, nircm->tuple.flow_ident);
180	DEBUG_TRACE("%p: return_ip: %pI4h:%d\n", npci, &nircm->tuple.return_ip, nircm->tuple.return_ident);
181	DEBUG_TRACE("%p: protocol: %d\n", npci, nircm->tuple.protocol);
182
183	/*
184	 * Handle the creation result code.
185	 */
186	DEBUG_TRACE("%p: response: %d\n", npci, nim->cm.response);
187	if (nim->cm.response != NSS_CMN_RESPONSE_ACK) {
188		/*
189		 * Creation command failed (specific reason ignored).
190		 */
191		DEBUG_TRACE("%p: accel nack: %d\n", npci, nim->cm.error);
192		spin_lock_bh(&feci->lock);
193		DEBUG_ASSERT(feci->accel_mode == ECM_FRONT_END_ACCELERATION_MODE_ACCEL_PENDING, "%p: Unexpected mode: %d\n", ci, feci->accel_mode);
194		feci->stats.ae_nack++;
195		feci->stats.ae_nack_total++;
196		if (feci->stats.ae_nack >= feci->stats.ae_nack_limit) {
197			/*
198			 * Too many NSS rejections
199			 */
200			result_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_ACCEL_ENGINE;
201		} else {
202			/*
203			 * Revert to decelerated
204			 */
205			result_mode = ECM_FRONT_END_ACCELERATION_MODE_DECEL;
206		}
207
208		/*
209		 * If connection is now defunct then set mode to ensure no further accel attempts occur
210		 */
211		if (feci->is_defunct) {
212			result_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT;
213		}
214
215		spin_lock_bh(&ecm_nss_ipv4_lock);
216		_ecm_nss_ipv4_accel_pending_clear(feci, result_mode);
217		spin_unlock_bh(&ecm_nss_ipv4_lock);
218
219		spin_unlock_bh(&feci->lock);
220
221		/*
222		 * Release the connection.
223		 */
224		feci->deref(feci);
225		ecm_db_connection_deref(ci);
226		return;
227	}
228
229	spin_lock_bh(&feci->lock);
230	DEBUG_ASSERT(feci->accel_mode == ECM_FRONT_END_ACCELERATION_MODE_ACCEL_PENDING, "%p: Unexpected mode: %d\n", ci, feci->accel_mode);
231
232	/*
233	 * If a flush occured before we got the ACK then our acceleration was effectively cancelled on us
234	 * GGG TODO This is a workaround for a NSS message OOO quirk, this should eventually be removed.
235	 */
236	if (feci->stats.flush_happened) {
237		feci->stats.flush_happened = false;
238
239		/*
240		 * Increment the no-action counter.  Our connection was decelerated on us with no action occurring.
241		 */
242		feci->stats.no_action_seen++;
243
244		spin_lock_bh(&ecm_nss_ipv4_lock);
245		_ecm_nss_ipv4_accel_pending_clear(feci, ECM_FRONT_END_ACCELERATION_MODE_DECEL);
246		spin_unlock_bh(&ecm_nss_ipv4_lock);
247
248		spin_unlock_bh(&feci->lock);
249
250		/*
251		 * Release the connection.
252		 */
253		feci->deref(feci);
254		ecm_db_connection_deref(ci);
255		return;
256	}
257
258	/*
259	 * Create succeeded
260	 */
261
262	/*
263	 * Clear any nack count
264	 */
265	feci->stats.ae_nack = 0;
266
267	/*
268	 * Clear the "accelerate pending" state and move to "accelerated" state bumping
269	 * the accelerated counters to match our new state.
270	 *
271	 * Decelerate may have been attempted while we were "pending accel" and
272	 * this function will return true if that was the case.
273	 * If decelerate was pending then we need to begin deceleration :-(
274	 */
275	spin_lock_bh(&ecm_nss_ipv4_lock);
276
277	ecm_nss_ported_ipv4_accelerated_count[npci->ported_accelerated_count_index]++;	/* Protocol specific counter */
278	ecm_nss_ipv4_accelerated_count++;		/* General running counter */
279
280	if (!_ecm_nss_ipv4_accel_pending_clear(feci, ECM_FRONT_END_ACCELERATION_MODE_ACCEL)) {
281		/*
282		 * Increment the no-action counter, this is reset if offload action is seen
283		 */
284		feci->stats.no_action_seen++;
285
286		spin_unlock_bh(&ecm_nss_ipv4_lock);
287		spin_unlock_bh(&feci->lock);
288
289		/*
290		 * Release the connection.
291		 */
292		feci->deref(feci);
293		ecm_db_connection_deref(ci);
294		return;
295	}
296
297	DEBUG_INFO("%p: Decelerate was pending\n", ci);
298
299	spin_unlock_bh(&ecm_nss_ipv4_lock);
300	spin_unlock_bh(&feci->lock);
301
302	feci->decelerate(feci);
303
304	/*
305	 * Release the connection.
306	 */
307	feci->deref(feci);
308	ecm_db_connection_deref(ci);
309}
310
311/*
312 * ecm_nss_ported_ipv4_connection_accelerate()
313 *	Accelerate a connection
314 */
315static void ecm_nss_ported_ipv4_connection_accelerate(struct ecm_front_end_connection_instance *feci,
316									struct ecm_classifier_process_response *pr, bool is_l2_encap,
317									struct nf_conn *ct)
318{
319	struct ecm_nss_ported_ipv4_connection_instance *npci = (struct ecm_nss_ported_ipv4_connection_instance *)feci;
320	uint16_t regen_occurrances;
321	int protocol;
322	int32_t from_ifaces_first;
323	int32_t to_ifaces_first;
324	struct ecm_db_iface_instance *from_ifaces[ECM_DB_IFACE_HEIRARCHY_MAX];
325	struct ecm_db_iface_instance *to_ifaces[ECM_DB_IFACE_HEIRARCHY_MAX];
326	struct ecm_db_iface_instance *from_nss_iface;
327	struct ecm_db_iface_instance *to_nss_iface;
328	int32_t from_nss_iface_id;
329	int32_t to_nss_iface_id;
330	uint8_t from_nss_iface_address[ETH_ALEN];
331	uint8_t to_nss_iface_address[ETH_ALEN];
332	ip_addr_t addr;
333	struct nss_ipv4_msg nim;
334	struct nss_ipv4_rule_create_msg *nircm;
335	struct ecm_classifier_instance *assignments[ECM_CLASSIFIER_TYPES];
336	int aci_index;
337	int assignment_count;
338	nss_tx_status_t nss_tx_status;
339	int32_t list_index;
340	int32_t interface_type_counts[ECM_DB_IFACE_TYPE_COUNT];
341	bool rule_invalid;
342	uint8_t dest_mac_xlate[ETH_ALEN];
343	ecm_db_direction_t ecm_dir;
344	ecm_front_end_acceleration_mode_t result_mode;
345
346	DEBUG_CHECK_MAGIC(npci, ECM_NSS_PORTED_IPV4_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", npci);
347
348	/*
349	 * Get the re-generation occurrance counter of the connection.
350	 * We compare it again at the end - to ensure that the rule construction has seen no generation
351	 * changes during rule creation.
352	 */
353	regen_occurrances = ecm_db_connection_regeneration_occurrances_get(feci->ci);
354
355	/*
356	 * Test if acceleration is permitted
357	 */
358	if (!ecm_nss_ipv4_accel_pending_set(feci)) {
359		DEBUG_TRACE("%p: Acceleration not permitted: %p\n", feci, feci->ci);
360		return;
361	}
362
363	/*
364	 * Okay construct an accel command.
365	 * Initialise creation structure.
366	 * NOTE: We leverage the app_data void pointer to be our 32 bit connection serial number.
367	 * When we get it back we re-cast it to a uint32 and do a faster connection lookup.
368	 */
369	memset(&nim, 0, sizeof(struct nss_ipv4_msg));
370	nss_ipv4_msg_init(&nim, NSS_IPV4_RX_INTERFACE, NSS_IPV4_TX_CREATE_RULE_MSG,
371			sizeof(struct nss_ipv4_rule_create_msg),
372			ecm_nss_ported_ipv4_connection_callback,
373			(void *)ecm_db_connection_serial_get(feci->ci));
374
375	nircm = &nim.msg.rule_create;
376	nircm->valid_flags = 0;
377	nircm->rule_flags = 0;
378
379	/*
380	 * Initialize VLAN tag information
381	 */
382	nircm->vlan_primary_rule.ingress_vlan_tag = ECM_NSS_CONNMGR_VLAN_ID_NOT_CONFIGURED;
383	nircm->vlan_primary_rule.egress_vlan_tag = ECM_NSS_CONNMGR_VLAN_ID_NOT_CONFIGURED;
384	nircm->vlan_secondary_rule.ingress_vlan_tag = ECM_NSS_CONNMGR_VLAN_ID_NOT_CONFIGURED;
385	nircm->vlan_secondary_rule.egress_vlan_tag = ECM_NSS_CONNMGR_VLAN_ID_NOT_CONFIGURED;
386
387	/*
388	 * Get the interface lists of the connection, we must have at least one interface in the list to continue
389	 */
390	from_ifaces_first = ecm_db_connection_from_interfaces_get_and_ref(feci->ci, from_ifaces);
391	if (from_ifaces_first == ECM_DB_IFACE_HEIRARCHY_MAX) {
392		DEBUG_WARN("%p: Accel attempt failed - no interfaces in from_interfaces list!\n", feci);
393		goto ported_accel_bad_rule;
394	}
395
396	to_ifaces_first = ecm_db_connection_to_interfaces_get_and_ref(feci->ci, to_ifaces);
397	if (to_ifaces_first == ECM_DB_IFACE_HEIRARCHY_MAX) {
398		DEBUG_WARN("%p: Accel attempt failed - no interfaces in to_interfaces list!\n", npci);
399		ecm_db_connection_interfaces_deref(from_ifaces, from_ifaces_first);
400		goto ported_accel_bad_rule;
401	}
402
403	/*
404	 * First interface in each must be a known nss interface
405	 */
406	from_nss_iface = from_ifaces[from_ifaces_first];
407	to_nss_iface = to_ifaces[to_ifaces_first];
408	from_nss_iface_id = ecm_db_iface_ae_interface_identifier_get(from_nss_iface);
409	to_nss_iface_id = ecm_db_iface_ae_interface_identifier_get(to_nss_iface);
410	if ((from_nss_iface_id < 0) || (to_nss_iface_id < 0)) {
411		DEBUG_TRACE("%p: from_nss_iface_id: %d, to_nss_iface_id: %d\n", npci, from_nss_iface_id, to_nss_iface_id);
412		ecm_db_connection_interfaces_deref(from_ifaces, from_ifaces_first);
413		ecm_db_connection_interfaces_deref(to_ifaces, to_ifaces_first);
414		goto ported_accel_bad_rule;
415	}
416
417	/*
418	 * New rule being created
419	 */
420	nircm->valid_flags |= NSS_IPV4_RULE_CREATE_CONN_VALID;
421
422	/*
423	 * Set interface numbers involved in accelerating this connection.
424	 * These are the outer facing addresses from the heirarchy interface lists we got above.
425	 * These may be overridden later if we detect special interface types e.g. ipsec.
426	 */
427	nircm->conn_rule.flow_interface_num = from_nss_iface_id;
428	nircm->conn_rule.return_interface_num = to_nss_iface_id;
429
430	/*
431	 * We know that each outward facing interface is known to the NSS and so this connection could be accelerated.
432	 * However the lists may also specify other interesting details that must be included in the creation command,
433	 * for example, ethernet MAC, VLAN tagging or PPPoE session information.
434	 * We get this information by walking from the outer to the innermost interface for each list and examine the interface types.
435	 *
436	 * Start with the 'from' (src) side.
437	 * NOTE: The lists may contain a complex heirarchy of similar type of interface e.g. multiple vlans or tunnels within tunnels.
438	 * This NSS cannot handle that - there is no way to describe this in the rule - if we see multiple types that would conflict we have to abort.
439	 */
440	DEBUG_TRACE("%p: Examine from/src heirarchy list\n", npci);
441	memset(interface_type_counts, 0, sizeof(interface_type_counts));
442	rule_invalid = false;
443	for (list_index = from_ifaces_first; !rule_invalid && (list_index < ECM_DB_IFACE_HEIRARCHY_MAX); list_index++) {
444		struct ecm_db_iface_instance *ii;
445		ecm_db_iface_type_t ii_type;
446		char *ii_name;
447
448		ii = from_ifaces[list_index];
449		ii_type = ecm_db_connection_iface_type_get(ii);
450		ii_name = ecm_db_interface_type_to_string(ii_type);
451		DEBUG_TRACE("%p: list_index: %d, ii: %p, type: %d (%s)\n", npci, list_index, ii, ii_type, ii_name);
452
453		/*
454		 * Extract information from this interface type if it is applicable to the rule.
455		 * Conflicting information may cause accel to be unsupported.
456		 */
457		switch (ii_type) {
458#ifdef ECM_INTERFACE_PPP_ENABLE
459			struct ecm_db_interface_info_pppoe pppoe_info;
460#endif
461#ifdef ECM_INTERFACE_VLAN_ENABLE
462			struct ecm_db_interface_info_vlan vlan_info;
463			uint32_t vlan_value = 0;
464			struct net_device *vlan_in_dev = NULL;
465#endif
466		case ECM_DB_IFACE_TYPE_BRIDGE:
467			DEBUG_TRACE("%p: Bridge\n", npci);
468			if (interface_type_counts[ii_type] != 0) {
469				/*
470				 * Cannot cascade bridges
471				 */
472				rule_invalid = true;
473				DEBUG_TRACE("%p: Bridge - ignore additional\n", npci);
474				break;
475			}
476			ecm_db_iface_bridge_address_get(ii, from_nss_iface_address);
477			if (is_valid_ether_addr(from_nss_iface_address)) {
478				memcpy(nircm->src_mac_rule.flow_src_mac, from_nss_iface_address, ETH_ALEN);
479				nircm->src_mac_rule.mac_valid_flags |= NSS_IPV4_SRC_MAC_FLOW_VALID;
480				nircm->valid_flags |= NSS_IPV4_RULE_CREATE_SRC_MAC_VALID;
481			}
482
483			DEBUG_TRACE("%p: Bridge - mac: %pM\n", npci, from_nss_iface_address);
484			break;
485		case ECM_DB_IFACE_TYPE_ETHERNET:
486			DEBUG_TRACE("%p: Ethernet\n", npci);
487			if (interface_type_counts[ii_type] != 0) {
488				/*
489				 * Ignore additional mac addresses, these are usually as a result of address propagation
490				 * from bridges down to ports etc.
491				 */
492				DEBUG_TRACE("%p: Ethernet - ignore additional\n", npci);
493				break;
494			}
495
496			/*
497			 * Can only handle one MAC, the first outermost mac.
498			 */
499			ecm_db_iface_ethernet_address_get(ii, from_nss_iface_address);
500			DEBUG_TRACE("%p: Ethernet - mac: %pM\n", npci, from_nss_iface_address);
501			break;
502		case ECM_DB_IFACE_TYPE_PPPOE:
503#ifdef ECM_INTERFACE_PPP_ENABLE
504			/*
505			 * More than one PPPoE in the list is not valid!
506			 */
507			if (interface_type_counts[ii_type] != 0) {
508				DEBUG_TRACE("%p: PPPoE - additional unsupported\n", npci);
509				rule_invalid = true;
510				break;
511			}
512
513			/*
514			 * Copy pppoe session info to the creation structure.
515			 */
516			ecm_db_iface_pppoe_session_info_get(ii, &pppoe_info);
517
518			nircm->pppoe_rule.flow_pppoe_session_id = pppoe_info.pppoe_session_id;
519			memcpy(nircm->pppoe_rule.flow_pppoe_remote_mac, pppoe_info.remote_mac, ETH_ALEN);
520			nircm->valid_flags |= NSS_IPV4_RULE_CREATE_PPPOE_VALID;
521
522			DEBUG_TRACE("%p: PPPoE - session: %x, mac: %pM\n", npci,
523					nircm->pppoe_rule.flow_pppoe_session_id,
524					nircm->pppoe_rule.flow_pppoe_remote_mac);
525#else
526			rule_invalid = true;
527#endif
528			break;
529		case ECM_DB_IFACE_TYPE_VLAN:
530#ifdef ECM_INTERFACE_VLAN_ENABLE
531			DEBUG_TRACE("%p: VLAN\n", npci);
532			if (interface_type_counts[ii_type] > 1) {
533				/*
534				 * Can only support two vlans
535				 */
536				rule_invalid = true;
537				DEBUG_TRACE("%p: VLAN - additional unsupported\n", npci);
538				break;
539			}
540			ecm_db_iface_vlan_info_get(ii, &vlan_info);
541			vlan_value = ((vlan_info.vlan_tpid << 16) | vlan_info.vlan_tag);
542
543			/*
544			 * Look up the vlan device and incorporate the vlan priority into the vlan_value
545			 */
546			vlan_in_dev = dev_get_by_index(&init_net, ecm_db_iface_interface_identifier_get(ii));
547			if (vlan_in_dev) {
548				vlan_value |= vlan_dev_get_egress_prio(vlan_in_dev, pr->return_qos_tag);
549				dev_put(vlan_in_dev);
550				vlan_in_dev = NULL;
551			}
552
553			/*
554			 * Primary or secondary (QinQ) VLAN?
555			 */
556			if (interface_type_counts[ii_type] == 0) {
557				nircm->vlan_primary_rule.ingress_vlan_tag = vlan_value;
558			} else {
559				nircm->vlan_secondary_rule.ingress_vlan_tag = vlan_value;
560			}
561			nircm->valid_flags |= NSS_IPV4_RULE_CREATE_VLAN_VALID;
562
563			/*
564			 * If we have not yet got an ethernet mac then take this one (very unlikely as mac should have been propagated to the slave (outer) device
565			 */
566			if (interface_type_counts[ECM_DB_IFACE_TYPE_ETHERNET] == 0) {
567				memcpy(from_nss_iface_address, vlan_info.address, ETH_ALEN);
568				interface_type_counts[ECM_DB_IFACE_TYPE_ETHERNET]++;
569				DEBUG_TRACE("%p: VLAN use mac: %pM\n", npci, from_nss_iface_address);
570			}
571			DEBUG_TRACE("%p: vlan tag: %x\n", npci, vlan_value);
572#else
573			rule_invalid = true;
574			DEBUG_TRACE("%p: VLAN - unsupported\n", npci);
575#endif
576			break;
577		case ECM_DB_IFACE_TYPE_IPSEC_TUNNEL:
578#ifdef ECM_INTERFACE_IPSEC_ENABLE
579			DEBUG_TRACE("%p: IPSEC\n", npci);
580			if (interface_type_counts[ii_type] != 0) {
581				/*
582				 * Can only support one ipsec
583				 */
584				rule_invalid = true;
585				DEBUG_TRACE("%p: IPSEC - additional unsupported\n", npci);
586				break;
587			}
588			nircm->conn_rule.flow_interface_num = NSS_C2C_TX_INTERFACE;
589#else
590			rule_invalid = true;
591			DEBUG_TRACE("%p: IPSEC - unsupported\n", npci);
592#endif
593			break;
594		default:
595			DEBUG_TRACE("%p: Ignoring: %d (%s)\n", npci, ii_type, ii_name);
596		}
597
598		/*
599		 * Seen an interface of this type
600		 */
601		interface_type_counts[ii_type]++;
602	}
603	if (rule_invalid) {
604		DEBUG_WARN("%p: from/src Rule invalid\n", npci);
605		ecm_db_connection_interfaces_deref(from_ifaces, from_ifaces_first);
606		ecm_db_connection_interfaces_deref(to_ifaces, to_ifaces_first);
607		goto ported_accel_bad_rule;
608	}
609
610	/*
611	 * Now examine the TO / DEST heirarchy list to construct the destination part of the rule
612	 */
613	DEBUG_TRACE("%p: Examine to/dest heirarchy list\n", npci);
614	memset(interface_type_counts, 0, sizeof(interface_type_counts));
615	rule_invalid = false;
616	for (list_index = to_ifaces_first; !rule_invalid && (list_index < ECM_DB_IFACE_HEIRARCHY_MAX); list_index++) {
617		struct ecm_db_iface_instance *ii;
618		ecm_db_iface_type_t ii_type;
619		char *ii_name;
620
621		ii = to_ifaces[list_index];
622		ii_type = ecm_db_connection_iface_type_get(ii);
623		ii_name = ecm_db_interface_type_to_string(ii_type);
624		DEBUG_TRACE("%p: list_index: %d, ii: %p, type: %d (%s)\n", npci, list_index, ii, ii_type, ii_name);
625
626		/*
627		 * Extract information from this interface type if it is applicable to the rule.
628		 * Conflicting information may cause accel to be unsupported.
629		 */
630		switch (ii_type) {
631#ifdef ECM_INTERFACE_PPP_ENABLE
632			struct ecm_db_interface_info_pppoe pppoe_info;
633#endif
634#ifdef ECM_INTERFACE_VLAN_ENABLE
635			struct ecm_db_interface_info_vlan vlan_info;
636			uint32_t vlan_value = 0;
637			struct net_device *vlan_out_dev = NULL;
638#endif
639		case ECM_DB_IFACE_TYPE_BRIDGE:
640			DEBUG_TRACE("%p: Bridge\n", npci);
641			if (interface_type_counts[ii_type] != 0) {
642				/*
643				 * Cannot cascade bridges
644				 */
645				rule_invalid = true;
646				DEBUG_TRACE("%p: Bridge - ignore additional\n", npci);
647				break;
648			}
649			ecm_db_iface_bridge_address_get(ii, to_nss_iface_address);
650			if (is_valid_ether_addr(to_nss_iface_address)) {
651				memcpy(nircm->src_mac_rule.return_src_mac, to_nss_iface_address, ETH_ALEN);
652				nircm->src_mac_rule.mac_valid_flags |= NSS_IPV4_SRC_MAC_RETURN_VALID;
653				nircm->valid_flags |= NSS_IPV4_RULE_CREATE_SRC_MAC_VALID;
654			}
655
656			DEBUG_TRACE("%p: Bridge - mac: %pM\n", npci, to_nss_iface_address);
657			break;
658		case ECM_DB_IFACE_TYPE_ETHERNET:
659			DEBUG_TRACE("%p: Ethernet\n", npci);
660			if (interface_type_counts[ii_type] != 0) {
661				/*
662				 * Ignore additional mac addresses, these are usually as a result of address propagation
663				 * from bridges down to ports etc.
664				 */
665				DEBUG_TRACE("%p: Ethernet - ignore additional\n", npci);
666				break;
667			}
668
669			/*
670			 * Can only handle one MAC, the first outermost mac.
671			 */
672			ecm_db_iface_ethernet_address_get(ii, to_nss_iface_address);
673			DEBUG_TRACE("%p: Ethernet - mac: %pM\n", npci, to_nss_iface_address);
674			break;
675		case ECM_DB_IFACE_TYPE_PPPOE:
676#ifdef ECM_INTERFACE_PPP_ENABLE
677			/*
678			 * More than one PPPoE in the list is not valid!
679			 */
680			if (interface_type_counts[ii_type] != 0) {
681				DEBUG_TRACE("%p: PPPoE - additional unsupported\n", npci);
682				rule_invalid = true;
683				break;
684			}
685
686			/*
687			 * Copy pppoe session info to the creation structure.
688			 */
689			ecm_db_iface_pppoe_session_info_get(ii, &pppoe_info);
690			nircm->pppoe_rule.return_pppoe_session_id = pppoe_info.pppoe_session_id;
691			memcpy(nircm->pppoe_rule.return_pppoe_remote_mac, pppoe_info.remote_mac, ETH_ALEN);
692			nircm->valid_flags |= NSS_IPV4_RULE_CREATE_PPPOE_VALID;
693
694			DEBUG_TRACE("%p: PPPoE - session: %x, mac: %pM\n", npci,
695				    nircm->pppoe_rule.return_pppoe_session_id,
696				    nircm->pppoe_rule.return_pppoe_remote_mac);
697#else
698			rule_invalid = true;
699#endif
700			break;
701		case ECM_DB_IFACE_TYPE_VLAN:
702#ifdef ECM_INTERFACE_VLAN_ENABLE
703			DEBUG_TRACE("%p: VLAN\n", npci);
704			if (interface_type_counts[ii_type] > 1) {
705				/*
706				 * Can only support two vlans
707				 */
708				rule_invalid = true;
709				DEBUG_TRACE("%p: VLAN - additional unsupported\n", npci);
710				break;
711			}
712			ecm_db_iface_vlan_info_get(ii, &vlan_info);
713			vlan_value = ((vlan_info.vlan_tpid << 16) | vlan_info.vlan_tag);
714
715			/*
716			 * Look up the vlan device and incorporate the vlan priority into the vlan_value
717			 */
718			vlan_out_dev = dev_get_by_index(&init_net, ecm_db_iface_interface_identifier_get(ii));
719			if (vlan_out_dev) {
720				vlan_value |= vlan_dev_get_egress_prio(vlan_out_dev, pr->flow_qos_tag);
721				dev_put(vlan_out_dev);
722				vlan_out_dev = NULL;
723			}
724
725			/*
726			 * Primary or secondary (QinQ) VLAN?
727			 */
728			if (interface_type_counts[ii_type] == 0) {
729				nircm->vlan_primary_rule.egress_vlan_tag = vlan_value;
730			} else {
731				nircm->vlan_secondary_rule.egress_vlan_tag = vlan_value;
732			}
733			nircm->valid_flags |= NSS_IPV4_RULE_CREATE_VLAN_VALID;
734
735			/*
736			 * If we have not yet got an ethernet mac then take this one (very unlikely as mac should have been propagated to the slave (outer) device
737			 */
738			if (interface_type_counts[ECM_DB_IFACE_TYPE_ETHERNET] == 0) {
739				memcpy(to_nss_iface_address, vlan_info.address, ETH_ALEN);
740				interface_type_counts[ECM_DB_IFACE_TYPE_ETHERNET]++;
741				DEBUG_TRACE("%p: VLAN use mac: %pM\n", npci, to_nss_iface_address);
742			}
743			DEBUG_TRACE("%p: vlan tag: %x\n", npci, vlan_value);
744#else
745			rule_invalid = true;
746			DEBUG_TRACE("%p: VLAN - unsupported\n", npci);
747#endif
748			break;
749		case ECM_DB_IFACE_TYPE_IPSEC_TUNNEL:
750#ifdef ECM_INTERFACE_IPSEC_ENABLE
751			DEBUG_TRACE("%p: IPSEC\n", npci);
752			if (interface_type_counts[ii_type] != 0) {
753				/*
754				 * Can only support one ipsec
755				 */
756				rule_invalid = true;
757				DEBUG_TRACE("%p: IPSEC - additional unsupported\n", npci);
758				break;
759			}
760			nircm->conn_rule.return_interface_num = NSS_C2C_TX_INTERFACE;
761#else
762			rule_invalid = true;
763			DEBUG_TRACE("%p: IPSEC - unsupported\n", npci);
764#endif
765			break;
766		default:
767			DEBUG_TRACE("%p: Ignoring: %d (%s)\n", npci, ii_type, ii_name);
768		}
769
770		/*
771		 * Seen an interface of this type
772		 */
773		interface_type_counts[ii_type]++;
774	}
775	if (rule_invalid) {
776		DEBUG_WARN("%p: from/src Rule invalid\n", npci);
777		ecm_db_connection_interfaces_deref(from_ifaces, from_ifaces_first);
778		ecm_db_connection_interfaces_deref(to_ifaces, to_ifaces_first);
779		goto ported_accel_bad_rule;
780	}
781
782	/*
783	 * Routed or bridged?
784	 */
785	if (ecm_db_connection_is_routed_get(feci->ci)) {
786		nircm->rule_flags |= NSS_IPV4_RULE_CREATE_FLAG_ROUTED;
787	} else {
788		nircm->rule_flags |= NSS_IPV4_RULE_CREATE_FLAG_BRIDGE_FLOW;
789		if (is_l2_encap) {
790			nircm->rule_flags |= NSS_IPV4_RULE_CREATE_FLAG_L2_ENCAP;
791		}
792	}
793
794	/*
795	 * Set up the flow and return qos tags
796	 */
797	nircm->qos_rule.flow_qos_tag = (uint32_t)pr->flow_qos_tag;
798	nircm->qos_rule.return_qos_tag = (uint32_t)pr->return_qos_tag;
799	nircm->valid_flags |= NSS_IPV4_RULE_CREATE_QOS_VALID;
800
801#ifdef ECM_CLASSIFIER_DSCP_ENABLE
802	/*
803	 * DSCP information?
804	 */
805	if (pr->process_actions & ECM_CLASSIFIER_PROCESS_ACTION_DSCP) {
806		nircm->dscp_rule.flow_dscp = pr->flow_dscp;
807		nircm->dscp_rule.return_dscp = pr->return_dscp;
808		nircm->rule_flags |= NSS_IPV4_RULE_CREATE_FLAG_DSCP_MARKING;
809		nircm->valid_flags |= NSS_IPV4_RULE_CREATE_DSCP_MARKING_VALID;
810	}
811#endif
812	protocol = ecm_db_connection_protocol_get(feci->ci);
813
814	/*
815	 * Set protocol
816	 */
817	nircm->tuple.protocol = (int32_t)protocol;
818
819	/*
820	 * The flow_ip is where the connection established from
821	 */
822	ecm_db_connection_from_address_get(feci->ci, addr);
823	ECM_IP_ADDR_TO_HIN4_ADDR(nircm->tuple.flow_ip, addr);
824
825	/*
826	 * The return_ip is where the connection is established to, however, in the case of ingress
827	 * the return_ip would be the routers WAN IP - i.e. the NAT'ed version.
828	 * Getting the NAT'ed version here works for ingress or egress packets, for egress
829	 * the NAT'ed version would be the same as the normal address
830	 */
831	ecm_db_connection_to_address_nat_get(feci->ci, addr);
832	ECM_IP_ADDR_TO_HIN4_ADDR(nircm->tuple.return_ip, addr);
833
834	/*
835	 * When the packet is forwarded to the next interface get the address the source IP of the
836	 * packet should be translated to.  For egress this is the NAT'ed from address.
837	 * This also works for ingress as the NAT'ed version of the WAN host would be the same as non-NAT'ed
838	 */
839	ecm_db_connection_from_address_nat_get(feci->ci, addr);
840	ECM_IP_ADDR_TO_HIN4_ADDR(nircm->conn_rule.flow_ip_xlate, addr);
841
842	/*
843	 * The destination address is what the destination IP is translated to as it is forwarded to the next interface.
844	 * For egress this would yield the normal wan host and for ingress this would correctly NAT back to the LAN host
845	 */
846	ecm_db_connection_to_address_get(feci->ci, addr);
847	ECM_IP_ADDR_TO_HIN4_ADDR(nircm->conn_rule.return_ip_xlate, addr);
848
849	/*
850	 * Same approach as above for port information
851	 */
852	nircm->tuple.flow_ident = ecm_db_connection_from_port_get(feci->ci);
853	nircm->tuple.return_ident = ecm_db_connection_to_port_nat_get(feci->ci);
854	nircm->conn_rule.flow_ident_xlate = ecm_db_connection_from_port_nat_get(feci->ci);
855	nircm->conn_rule.return_ident_xlate = ecm_db_connection_to_port_get(feci->ci);
856
857	/*
858	 * Get mac addresses.
859	 * The src_mac is the mac address of the node that established the connection.
860	 * This will work whether the from_node is LAN (egress) or WAN (ingress).
861	 */
862	ecm_db_connection_from_node_address_get(feci->ci, (uint8_t *)nircm->conn_rule.flow_mac);
863
864	/*
865	 * The dest_mac is more complex.  For egress it is the node address of the 'to' side of the connection.
866	 * For ingress it is the node adress of the NAT'ed 'to' IP.
867	 * Essentially it is the MAC of node associated with create.dest_ip and this is "to nat" side.
868	 */
869	ecm_db_connection_to_nat_node_address_get(feci->ci, (uint8_t *)nircm->conn_rule.return_mac);
870
871	/*
872	 * The dest_mac_xlate is the mac address to replace the pkt.dst_mac when a packet is sent to->from
873	 * For bridged connections this does not change.
874	 * For routed connections this is the mac of the 'to' node side of the connection.
875	 */
876	if (ecm_db_connection_is_routed_get(feci->ci)) {
877		ecm_db_connection_to_node_address_get(feci->ci, dest_mac_xlate);
878	} else {
879		/*
880		 * Bridge flows preserve the MAC addressing
881		 */
882		memcpy(dest_mac_xlate, (uint8_t *)nircm->conn_rule.return_mac, ETH_ALEN);
883	}
884
885	/*
886	 * Refer to the Example 2 and 3 in ecm_nss_ipv4_ip_process() function for egress
887	 * and ingress NAT'ed cases. In these cases, the destination node is the one which has the
888	 * ip_dest_addr. So, above we get the mac address of this host and use that mac address
889	 * for the destination node address in NAT'ed cases.
890	 */
891	ecm_dir = ecm_db_connection_direction_get(feci->ci);
892	if ((ecm_dir == ECM_DB_DIRECTION_INGRESS_NAT) || (ecm_dir == ECM_DB_DIRECTION_EGRESS_NAT)) {
893		memcpy(nircm->conn_rule.return_mac, dest_mac_xlate, ETH_ALEN);
894	}
895
896	/*
897	 * Get MTU information
898	 */
899	nircm->conn_rule.flow_mtu = (uint32_t)ecm_db_connection_from_iface_mtu_get(feci->ci);
900	nircm->conn_rule.return_mtu = (uint32_t)ecm_db_connection_to_iface_mtu_get(feci->ci);
901
902	if (protocol == IPPROTO_TCP) {
903		/*
904		 * Need window scaling and remarking information if available
905		 * Start by looking up the conntrack connection
906		 */
907		if (!ct) {
908			/*
909			 * No conntrack so no need to check window sequence space
910			 */
911			DEBUG_TRACE("%p: TCP Accel no ct from conn %p to get window data\n", npci, feci->ci);
912			nircm->rule_flags |= NSS_IPV4_RULE_CREATE_FLAG_NO_SEQ_CHECK;
913		} else {
914			spin_lock_bh(&ct->lock);
915			DEBUG_TRACE("%p: TCP Accel Get window data from ct %p for conn %p\n", npci, ct, feci->ci);
916
917			nircm->tcp_rule.flow_window_scale = ct->proto.tcp.seen[0].td_scale;
918			nircm->tcp_rule.flow_max_window = ct->proto.tcp.seen[0].td_maxwin;
919			nircm->tcp_rule.flow_end = ct->proto.tcp.seen[0].td_end;
920			nircm->tcp_rule.flow_max_end = ct->proto.tcp.seen[0].td_maxend;
921			nircm->tcp_rule.return_window_scale = ct->proto.tcp.seen[1].td_scale;
922			nircm->tcp_rule.return_max_window = ct->proto.tcp.seen[1].td_maxwin;
923			nircm->tcp_rule.return_end = ct->proto.tcp.seen[1].td_end;
924			nircm->tcp_rule.return_max_end = ct->proto.tcp.seen[1].td_maxend;
925#ifdef ECM_OPENWRT_SUPPORT
926			if (nf_ct_tcp_be_liberal || nf_ct_tcp_no_window_check
927#else
928			if (nf_ct_tcp_be_liberal
929#endif
930					|| (ct->proto.tcp.seen[0].flags & IP_CT_TCP_FLAG_BE_LIBERAL)
931					|| (ct->proto.tcp.seen[1].flags & IP_CT_TCP_FLAG_BE_LIBERAL)) {
932				nircm->rule_flags |= NSS_IPV4_RULE_CREATE_FLAG_NO_SEQ_CHECK;
933			}
934			spin_unlock_bh(&ct->lock);
935		}
936
937		nircm->valid_flags |= NSS_IPV4_RULE_CREATE_TCP_VALID;
938	}
939
940	/*
941	 * Sync our creation command from the assigned classifiers to get specific additional creation rules.
942	 * NOTE: These are called in ascending order of priority and so the last classifier (highest) shall
943	 * override any preceding classifiers.
944	 * This also gives the classifiers a chance to see that acceleration is being attempted.
945	 */
946	assignment_count = ecm_db_connection_classifier_assignments_get_and_ref(feci->ci, assignments);
947	for (aci_index = 0; aci_index < assignment_count; ++aci_index) {
948		struct ecm_classifier_instance *aci;
949		struct ecm_classifier_rule_create ecrc;
950		/*
951		 * NOTE: The current classifiers do not sync anything to the underlying accel engines.
952		 * In the future, if any of the classifiers wants to pass any parameter, these parameters
953		 * should be received via this object and copied to the accel engine's create object (nircm).
954		*/
955		aci = assignments[aci_index];
956		DEBUG_TRACE("%p: sync from: %p, type: %d\n", npci, aci, aci->type_get(aci));
957		aci->sync_from_v4(aci, &ecrc);
958	}
959	ecm_db_connection_assignments_release(assignment_count, assignments);
960
961	/*
962	 * Release the interface lists
963	 */
964	ecm_db_connection_interfaces_deref(from_ifaces, from_ifaces_first);
965	ecm_db_connection_interfaces_deref(to_ifaces, to_ifaces_first);
966
967	DEBUG_INFO("%p: Ported Accelerate connection %p\n"
968			"Protocol: %d\n"
969			"from_mtu: %u\n"
970			"to_mtu: %u\n"
971			"from_ip: %pI4h:%d\n"
972			"to_ip: %pI4h:%d\n"
973			"from_ip_xlate: %pI4h:%d\n"
974			"to_ip_xlate: %pI4h:%d\n"
975			"from_mac: %pM\n"
976			"to_mac: %pM\n"
977			"src_iface_num: %u\n"
978			"dest_iface_num: %u\n"
979			"ingress_inner_vlan_tag: %u\n"
980			"egress_inner_vlan_tag: %u\n"
981			"ingress_outer_vlan_tag: %u\n"
982			"egress_outer_vlan_tag: %u\n"
983			"rule_flags: %x\n"
984			"valid_flags: %x\n"
985			"return_pppoe_session_id: %u\n"
986			"return_pppoe_remote_mac: %pM\n"
987			"flow_pppoe_session_id: %u\n"
988			"flow_pppoe_remote_mac: %pM\n"
989			"flow_qos_tag: %x (%u)\n"
990			"return_qos_tag: %x (%u)\n"
991			"flow_window_scale: %u\n"
992			"flow_max_window: %u\n"
993			"flow_end: %u\n"
994			"flow_max_end: %u\n"
995			"return_window_scale: %u\n"
996			"return_max_window: %u\n"
997			"return_end: %u\n"
998			"return_max_end: %u\n"
999			"flow_dscp: %x\n"
1000			"return_dscp: %x\n",
1001			npci,
1002			feci->ci,
1003			nircm->tuple.protocol,
1004			nircm->conn_rule.flow_mtu,
1005			nircm->conn_rule.return_mtu,
1006			&nircm->tuple.flow_ip, nircm->tuple.flow_ident,
1007			&nircm->tuple.return_ip, nircm->tuple.return_ident,
1008			&nircm->conn_rule.flow_ip_xlate, nircm->conn_rule.flow_ident_xlate,
1009			&nircm->conn_rule.return_ip_xlate, nircm->conn_rule.return_ident_xlate,
1010			nircm->conn_rule.flow_mac,
1011			nircm->conn_rule.return_mac,
1012			nircm->conn_rule.flow_interface_num,
1013			nircm->conn_rule.return_interface_num,
1014			nircm->vlan_primary_rule.ingress_vlan_tag,
1015			nircm->vlan_primary_rule.egress_vlan_tag,
1016			nircm->vlan_secondary_rule.ingress_vlan_tag,
1017			nircm->vlan_secondary_rule.egress_vlan_tag,
1018			nircm->rule_flags,
1019			nircm->valid_flags,
1020			nircm->pppoe_rule.return_pppoe_session_id,
1021			nircm->pppoe_rule.return_pppoe_remote_mac,
1022			nircm->pppoe_rule.flow_pppoe_session_id,
1023			nircm->pppoe_rule.flow_pppoe_remote_mac,
1024			nircm->qos_rule.flow_qos_tag, nircm->qos_rule.flow_qos_tag,
1025			nircm->qos_rule.return_qos_tag, nircm->qos_rule.return_qos_tag,
1026			nircm->tcp_rule.flow_window_scale,
1027			nircm->tcp_rule.flow_max_window,
1028			nircm->tcp_rule.flow_end,
1029			nircm->tcp_rule.flow_max_end,
1030			nircm->tcp_rule.return_window_scale,
1031			nircm->tcp_rule.return_max_window,
1032			nircm->tcp_rule.return_end,
1033			nircm->tcp_rule.return_max_end,
1034			nircm->dscp_rule.flow_dscp,
1035			nircm->dscp_rule.return_dscp);
1036
1037	if (protocol == IPPROTO_TCP) {
1038
1039		DEBUG_INFO("flow_window_scale: %u\n"
1040			"flow_max_window: %u\n"
1041			"flow_end: %u\n"
1042			"flow_max_end: %u\n"
1043			"return_window_scale: %u\n"
1044			"return_max_window: %u\n"
1045			"return_end: %u\n"
1046			"return_max_end: %u\n",
1047			nircm->tcp_rule.flow_window_scale,
1048			nircm->tcp_rule.flow_max_window,
1049			nircm->tcp_rule.flow_end,
1050			nircm->tcp_rule.flow_max_end,
1051			nircm->tcp_rule.return_window_scale,
1052			nircm->tcp_rule.return_max_window,
1053			nircm->tcp_rule.return_end,
1054			nircm->tcp_rule.return_max_end);
1055	}
1056
1057	/*
1058	 * Now that the rule has been constructed we re-compare the generation occurrance counter.
1059	 * If there has been a change then we abort because the rule may have been created using
1060	 * unstable data - especially if another thread has begun regeneration of the connection state.
1061	 * NOTE: This does not prevent a regen from being flagged immediately after this line of code either,
1062	 * or while the acceleration rule is in flight to the nss.
1063	 * This is only to check for consistency of rule state - not that the state is stale.
1064	 * Remember that the connection is marked as "accel pending state" so if a regen is flagged immediately
1065	 * after this check passes, the connection will be decelerated and refreshed very quickly.
1066	 */
1067	if (regen_occurrances != ecm_db_connection_regeneration_occurrances_get(feci->ci)) {
1068		DEBUG_INFO("%p: connection:%p regen occurred - aborting accel rule.\n", feci, feci->ci);
1069		ecm_nss_ipv4_accel_pending_clear(feci, ECM_FRONT_END_ACCELERATION_MODE_DECEL);
1070		return;
1071	}
1072
1073	/*
1074	 * Ref the connection before issuing an NSS rule
1075	 * This ensures that when the NSS responds to the command - which may even be immediately -
1076	 * the callback function can trust the correct ref was taken for its purpose.
1077	 * NOTE: remember that this will also implicitly hold the feci.
1078	 */
1079	ecm_db_connection_ref(feci->ci);
1080
1081	/*
1082	 * We are about to issue the command, record the time of transmission
1083	 */
1084	spin_lock_bh(&feci->lock);
1085	feci->stats.cmd_time_begun = jiffies;
1086	spin_unlock_bh(&feci->lock);
1087
1088	/*
1089	 * Call the rule create function
1090	 */
1091	nss_tx_status = nss_ipv4_tx(ecm_nss_ipv4_nss_ipv4_mgr, &nim);
1092	if (nss_tx_status == NSS_TX_SUCCESS) {
1093		/*
1094		 * Reset the driver_fail count - transmission was okay here.
1095		 */
1096		spin_lock_bh(&feci->lock);
1097		feci->stats.driver_fail = 0;
1098		spin_unlock_bh(&feci->lock);
1099		return;
1100	}
1101
1102	/*
1103	 * Release that ref!
1104	 */
1105	ecm_db_connection_deref(feci->ci);
1106
1107	/*
1108	 * TX failed
1109	 */
1110	spin_lock_bh(&feci->lock);
1111	DEBUG_ASSERT(feci->accel_mode == ECM_FRONT_END_ACCELERATION_MODE_ACCEL_PENDING, "%p: Accel mode unexpected: %d\n", feci, feci->accel_mode);
1112	feci->stats.driver_fail_total++;
1113	feci->stats.driver_fail++;
1114	if (feci->stats.driver_fail >= feci->stats.driver_fail_limit) {
1115		DEBUG_WARN("%p: Accel failed - driver fail limit\n", npci);
1116		result_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DRIVER;
1117	} else {
1118		result_mode = ECM_FRONT_END_ACCELERATION_MODE_DECEL;
1119	}
1120
1121	spin_lock_bh(&ecm_nss_ipv4_lock);
1122	_ecm_nss_ipv4_accel_pending_clear(feci, result_mode);
1123	spin_unlock_bh(&ecm_nss_ipv4_lock);
1124
1125	spin_unlock_bh(&feci->lock);
1126	return;
1127
1128ported_accel_bad_rule:
1129	;
1130
1131	/*
1132	 * Jump to here when rule data is bad and an offload command cannot be constructed
1133	 */
1134	DEBUG_WARN("%p: Accel failed - bad rule\n", npci);
1135	ecm_nss_ipv4_accel_pending_clear(feci, ECM_FRONT_END_ACCELERATION_MODE_FAIL_RULE);
1136}
1137
1138/*
1139 * ecm_nss_ported_ipv4_connection_destroy_callback()
1140 *	Callback for handling destroy ack/nack calls.
1141 */
1142static void ecm_nss_ported_ipv4_connection_destroy_callback(void *app_data, struct nss_ipv4_msg *nim)
1143{
1144	struct nss_ipv4_rule_destroy_msg * __attribute__((unused))nirdm = &nim->msg.rule_destroy;
1145	uint32_t serial = (uint32_t)app_data;
1146	struct ecm_db_connection_instance *ci;
1147	struct ecm_front_end_connection_instance *feci;
1148	struct ecm_nss_ported_ipv4_connection_instance *npci;
1149
1150	/*
1151	 * Is this a response to a destroy message?
1152	 */
1153	if (nim->cm.type != NSS_IPV4_TX_DESTROY_RULE_MSG) {
1154		DEBUG_ERROR("%p: ported destroy callback with improper type: %d\n", nim, nim->cm.type);
1155		return;
1156	}
1157
1158	/*
1159	 * Look up ecm connection so that we can update the status.
1160	 */
1161	ci = ecm_db_connection_serial_find_and_ref(serial);
1162	if (!ci) {
1163		DEBUG_TRACE("%p: destroy callback, connection not found, serial: %u\n", nim, serial);
1164		return;
1165	}
1166
1167	/*
1168	 * Release ref held for this ack/nack response.
1169	 * NOTE: It's okay to do this here, ci won't go away, because the ci is held as
1170	 * a result of the ecm_db_connection_serial_find_and_ref()
1171	 */
1172	ecm_db_connection_deref(ci);
1173
1174	/*
1175	 * Get the front end instance
1176	 */
1177	feci = ecm_db_connection_front_end_get_and_ref(ci);
1178	npci = (struct ecm_nss_ported_ipv4_connection_instance *)feci;
1179	DEBUG_CHECK_MAGIC(npci, ECM_NSS_PORTED_IPV4_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", npci);
1180
1181	/*
1182	 * Record command duration
1183	 */
1184	ecm_nss_ipv4_decel_done_time_update(feci);
1185
1186	/*
1187	 * Dump some useful trace information.
1188	 */
1189	DEBUG_TRACE("%p: decelerate response for connection: %p\n", npci, feci->ci);
1190	DEBUG_TRACE("%p: flow_ip: %pI4h:%d\n", npci, &nirdm->tuple.flow_ip, nirdm->tuple.flow_ident);
1191	DEBUG_TRACE("%p: return_ip: %pI4h:%d\n", npci, &nirdm->tuple.return_ip, nirdm->tuple.return_ident);
1192	DEBUG_TRACE("%p: protocol: %d\n", npci, nirdm->tuple.protocol);
1193
1194	/*
1195	 * Drop decel pending counter
1196	 */
1197	spin_lock_bh(&ecm_nss_ipv4_lock);
1198	ecm_nss_ipv4_pending_decel_count--;
1199	DEBUG_ASSERT(ecm_nss_ipv4_pending_decel_count >= 0, "Bad decel pending counter\n");
1200	spin_unlock_bh(&ecm_nss_ipv4_lock);
1201
1202	spin_lock_bh(&feci->lock);
1203
1204	/*
1205	 * If decel is not still pending then it's possible that the NSS ended acceleration by some other reason e.g. flush
1206	 * In which case we cannot rely on the response we get here.
1207	 */
1208	if (feci->accel_mode != ECM_FRONT_END_ACCELERATION_MODE_DECEL_PENDING) {
1209		spin_unlock_bh(&feci->lock);
1210
1211		/*
1212		 * Release the connections.
1213		 */
1214		feci->deref(feci);
1215		ecm_db_connection_deref(ci);
1216		return;
1217	}
1218
1219	DEBUG_TRACE("%p: response: %d\n", npci, nim->cm.response);
1220	if (nim->cm.response != NSS_CMN_RESPONSE_ACK) {
1221		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DECEL;
1222	} else {
1223		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_DECEL;
1224	}
1225
1226	/*
1227	 * If connection became defunct then set mode so that no further accel/decel attempts occur.
1228	 */
1229	if (feci->is_defunct) {
1230		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT;
1231	}
1232
1233	spin_unlock_bh(&feci->lock);
1234
1235	/*
1236	 * Ported acceleration ends
1237	 */
1238	spin_lock_bh(&ecm_nss_ipv4_lock);
1239	ecm_nss_ported_ipv4_accelerated_count[npci->ported_accelerated_count_index]--;	/* Protocol specific counter */
1240	DEBUG_ASSERT(ecm_nss_ported_ipv4_accelerated_count[npci->ported_accelerated_count_index] >= 0, "Bad udp accel counter\n");
1241	ecm_nss_ipv4_accelerated_count--;		/* General running counter */
1242	DEBUG_ASSERT(ecm_nss_ipv4_accelerated_count >= 0, "Bad accel counter\n");
1243	spin_unlock_bh(&ecm_nss_ipv4_lock);
1244
1245	/*
1246	 * Release the connections.
1247	 */
1248	feci->deref(feci);
1249	ecm_db_connection_deref(ci);
1250}
1251
1252/*
1253 * ecm_nss_ported_ipv4_connection_decelerate()
1254 *	Decelerate a connection
1255 */
1256static void ecm_nss_ported_ipv4_connection_decelerate(struct ecm_front_end_connection_instance *feci)
1257{
1258	struct ecm_nss_ported_ipv4_connection_instance *npci = (struct ecm_nss_ported_ipv4_connection_instance *)feci;
1259	struct nss_ipv4_msg nim;
1260	struct nss_ipv4_rule_destroy_msg *nirdm;
1261	ip_addr_t addr;
1262	nss_tx_status_t nss_tx_status;
1263
1264	DEBUG_CHECK_MAGIC(npci, ECM_NSS_PORTED_IPV4_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", npci);
1265
1266	/*
1267	 * If decelerate is in error or already pending then ignore
1268	 */
1269	spin_lock_bh(&feci->lock);
1270	if (feci->stats.decelerate_pending) {
1271		spin_unlock_bh(&feci->lock);
1272		return;
1273	}
1274
1275	/*
1276	 * If acceleration is pending then we cannot decelerate right now or we will race with it
1277	 * Set a decelerate pending flag that will be actioned when the acceleration command is complete.
1278	 */
1279	if (feci->accel_mode == ECM_FRONT_END_ACCELERATION_MODE_ACCEL_PENDING) {
1280		feci->stats.decelerate_pending = true;
1281		spin_unlock_bh(&feci->lock);
1282		return;
1283	}
1284
1285	/*
1286	 * Can only decelerate if accelerated
1287	 * NOTE: This will also deny accel when the connection is in fail condition too.
1288	 */
1289	if (feci->accel_mode != ECM_FRONT_END_ACCELERATION_MODE_ACCEL) {
1290		spin_unlock_bh(&feci->lock);
1291		return;
1292	}
1293
1294	/*
1295	 * Initiate deceleration
1296	 */
1297	feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_DECEL_PENDING;
1298	spin_unlock_bh(&feci->lock);
1299
1300	/*
1301	 * Increment the decel pending counter
1302	 */
1303	spin_lock_bh(&ecm_nss_ipv4_lock);
1304	ecm_nss_ipv4_pending_decel_count++;
1305	spin_unlock_bh(&ecm_nss_ipv4_lock);
1306
1307	/*
1308	 * Prepare deceleration message
1309	 */
1310	nss_ipv4_msg_init(&nim, NSS_IPV4_RX_INTERFACE, NSS_IPV4_TX_DESTROY_RULE_MSG,
1311			sizeof(struct nss_ipv4_rule_destroy_msg),
1312			ecm_nss_ported_ipv4_connection_destroy_callback,
1313			(void *)ecm_db_connection_serial_get(feci->ci));
1314
1315	nirdm = &nim.msg.rule_destroy;
1316	nirdm->tuple.protocol = (int32_t)ecm_db_connection_protocol_get(feci->ci);
1317
1318	/*
1319	 * Get addressing information
1320	 */
1321	ecm_db_connection_from_address_get(feci->ci, addr);
1322	ECM_IP_ADDR_TO_HIN4_ADDR(nirdm->tuple.flow_ip, addr);
1323	ecm_db_connection_to_address_nat_get(feci->ci, addr);
1324	ECM_IP_ADDR_TO_HIN4_ADDR(nirdm->tuple.return_ip, addr);
1325	nirdm->tuple.flow_ident = ecm_db_connection_from_port_get(feci->ci);
1326	nirdm->tuple.return_ident = ecm_db_connection_to_port_nat_get(feci->ci);
1327
1328	DEBUG_INFO("%p: Ported Connection %p decelerate\n"
1329			"protocol: %d\n"
1330			"src_ip: %pI4:%d\n"
1331			"dest_ip: %pI4:%d\n",
1332			npci, feci->ci, nirdm->tuple.protocol,
1333			&nirdm->tuple.flow_ip, nirdm->tuple.flow_ident,
1334			&nirdm->tuple.return_ip, nirdm->tuple.return_ident);
1335
1336	/*
1337	 * Take a ref to the feci->ci so that it will persist until we get a response from the NSS.
1338	 * NOTE: This will implicitly hold the feci too.
1339	 */
1340	ecm_db_connection_ref(feci->ci);
1341
1342	/*
1343	 * We are about to issue the command, record the time of transmission
1344	 */
1345	spin_lock_bh(&feci->lock);
1346	feci->stats.cmd_time_begun = jiffies;
1347	spin_unlock_bh(&feci->lock);
1348
1349	/*
1350	 * Destroy the NSS connection cache entry.
1351	 */
1352	nss_tx_status = nss_ipv4_tx(ecm_nss_ipv4_nss_ipv4_mgr, &nim);
1353	if (nss_tx_status == NSS_TX_SUCCESS) {
1354		/*
1355		 * Reset the driver_fail count - transmission was okay here.
1356		 */
1357		spin_lock_bh(&feci->lock);
1358		feci->stats.driver_fail = 0;
1359		spin_unlock_bh(&feci->lock);
1360		return;
1361	}
1362
1363	/*
1364	 * Release the ref take, NSS driver did not accept our command.
1365	 */
1366	ecm_db_connection_deref(feci->ci);
1367
1368	/*
1369	 * TX failed
1370	 */
1371	spin_lock_bh(&feci->lock);
1372	feci->stats.driver_fail_total++;
1373	feci->stats.driver_fail++;
1374	if (feci->stats.driver_fail >= feci->stats.driver_fail_limit) {
1375		DEBUG_WARN("%p: Decel failed - driver fail limit\n", npci);
1376		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DRIVER;
1377	}
1378	spin_unlock_bh(&feci->lock);
1379
1380	/*
1381	 * Could not send the request, decrement the decel pending counter
1382	 */
1383	spin_lock_bh(&ecm_nss_ipv4_lock);
1384	ecm_nss_ipv4_pending_decel_count--;
1385	DEBUG_ASSERT(ecm_nss_ipv4_pending_decel_count >= 0, "Bad decel pending counter\n");
1386	spin_unlock_bh(&ecm_nss_ipv4_lock);
1387}
1388
1389/*
1390 * ecm_nss_ported_ipv4_connection_defunct_callback()
1391 *	Callback to be called when a ported connection has become defunct.
1392 */
1393static void ecm_nss_ported_ipv4_connection_defunct_callback(void *arg)
1394{
1395	struct ecm_front_end_connection_instance *feci = (struct ecm_front_end_connection_instance *)arg;
1396	struct ecm_nss_ported_ipv4_connection_instance *npci = (struct ecm_nss_ported_ipv4_connection_instance *)feci;
1397
1398	DEBUG_CHECK_MAGIC(npci, ECM_NSS_PORTED_IPV4_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", npci);
1399
1400	spin_lock_bh(&feci->lock);
1401
1402	/*
1403	 * If connection has already become defunct, do nothing.
1404	 */
1405	if (feci->is_defunct) {
1406		spin_unlock_bh(&feci->lock);
1407		return;
1408	}
1409	feci->is_defunct = true;
1410
1411	/*
1412	 * If the connection is already in one of the fail modes, do nothing, keep the current accel_mode.
1413	 */
1414	if (ECM_FRONT_END_ACCELERATION_FAILED(feci->accel_mode)) {
1415		spin_unlock_bh(&feci->lock);
1416		return;
1417	}
1418
1419	/*
1420	 * If the connection is decel then ensure it will not attempt accel while defunct.
1421	 */
1422	if (feci->accel_mode == ECM_FRONT_END_ACCELERATION_MODE_DECEL) {
1423		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT;
1424		spin_unlock_bh(&feci->lock);
1425		return;
1426	}
1427
1428	/*
1429	 * If the connection is decel pending then decel operation is in progress anyway.
1430	 */
1431	if (feci->accel_mode == ECM_FRONT_END_ACCELERATION_MODE_DECEL_PENDING) {
1432		spin_unlock_bh(&feci->lock);
1433		return;
1434	}
1435
1436	/*
1437	 * If none of the cases matched above, this means the connection is in one of the
1438	 * accel modes (accel or accel_pending) so we force a deceleration.
1439	 * NOTE: If the mode is accel pending then the decel will be actioned when that is completed.
1440	 */
1441	spin_unlock_bh(&feci->lock);
1442	ecm_nss_ported_ipv4_connection_decelerate(feci);
1443}
1444
1445/*
1446 * ecm_nss_ported_ipv4_connection_accel_state_get()
1447 *	Get acceleration state
1448 */
1449static ecm_front_end_acceleration_mode_t ecm_nss_ported_ipv4_connection_accel_state_get(struct ecm_front_end_connection_instance *feci)
1450{
1451	struct ecm_nss_ported_ipv4_connection_instance *npci = (struct ecm_nss_ported_ipv4_connection_instance *)feci;
1452	ecm_front_end_acceleration_mode_t state;
1453
1454	DEBUG_CHECK_MAGIC(npci, ECM_NSS_PORTED_IPV4_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", npci);
1455	spin_lock_bh(&feci->lock);
1456	state = feci->accel_mode;
1457	spin_unlock_bh(&feci->lock);
1458	return state;
1459}
1460
1461/*
1462 * ecm_nss_ported_ipv4_connection_action_seen()
1463 *	Acceleration action / activity has been seen for this connection.
1464 *
1465 * NOTE: Call the action_seen() method when the NSS has demonstrated that it has offloaded some data for a connection.
1466 */
1467static void ecm_nss_ported_ipv4_connection_action_seen(struct ecm_front_end_connection_instance *feci)
1468{
1469	struct ecm_nss_ported_ipv4_connection_instance *npci = (struct ecm_nss_ported_ipv4_connection_instance *)feci;
1470
1471	DEBUG_CHECK_MAGIC(npci, ECM_NSS_PORTED_IPV4_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", npci);
1472	DEBUG_INFO("%p: Action seen\n", npci);
1473	spin_lock_bh(&feci->lock);
1474	feci->stats.no_action_seen = 0;
1475	spin_unlock_bh(&feci->lock);
1476}
1477
1478/*
1479 * ecm_nss_ported_ipv4_connection_accel_ceased()
1480 *	NSS has indicated that acceleration has stopped.
1481 *
1482 * NOTE: This is called in response to an NSS self-initiated termination of acceleration.
1483 * This must NOT be called because the ECM terminated the acceleration.
1484 */
1485static void ecm_nss_ported_ipv4_connection_accel_ceased(struct ecm_front_end_connection_instance *feci)
1486{
1487	struct ecm_nss_ported_ipv4_connection_instance *npci = (struct ecm_nss_ported_ipv4_connection_instance *)feci;
1488
1489	DEBUG_CHECK_MAGIC(npci, ECM_NSS_PORTED_IPV4_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", npci);
1490	DEBUG_INFO("%p: accel ceased\n", npci);
1491
1492	spin_lock_bh(&feci->lock);
1493
1494	/*
1495	 * If we are in accel-pending state then the NSS has issued a flush out-of-order
1496	 * with the ACK/NACK we are actually waiting for.
1497	 * To work around this we record a "flush has already happened" and will action it when we finally get that ACK/NACK.
1498	 * GGG TODO This should eventually be removed when the NSS honours messaging sequence.
1499	 */
1500	if (feci->accel_mode == ECM_FRONT_END_ACCELERATION_MODE_ACCEL_PENDING) {
1501		feci->stats.flush_happened = true;
1502		feci->stats.flush_happened_total++;
1503		spin_unlock_bh(&feci->lock);
1504		return;
1505	}
1506
1507	/*
1508	 * If connection is no longer accelerated by the time we get here just ignore the command
1509	 */
1510	if (feci->accel_mode != ECM_FRONT_END_ACCELERATION_MODE_ACCEL) {
1511		spin_unlock_bh(&feci->lock);
1512		return;
1513	}
1514
1515	/*
1516	 * If the no_action_seen counter was not reset then acceleration ended without any offload action
1517	 */
1518	if (feci->stats.no_action_seen) {
1519		feci->stats.no_action_seen_total++;
1520	}
1521
1522	/*
1523	 * If the no_action_seen indicates successive cessations of acceleration without any offload action occuring
1524	 * then we fail out this connection
1525	 */
1526	if (feci->stats.no_action_seen >= feci->stats.no_action_seen_limit) {
1527		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_NO_ACTION;
1528	} else {
1529		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_DECEL;
1530	}
1531	spin_unlock_bh(&feci->lock);
1532
1533	/*
1534	 * Ported acceleration ends
1535	 */
1536	spin_lock_bh(&ecm_nss_ipv4_lock);
1537	ecm_nss_ported_ipv4_accelerated_count[npci->ported_accelerated_count_index]--;	/* Protocol specific counter */
1538	DEBUG_ASSERT(ecm_nss_ported_ipv4_accelerated_count[npci->ported_accelerated_count_index] >= 0, "Bad ported accel counter\n");
1539	ecm_nss_ipv4_accelerated_count--;		/* General running counter */
1540	DEBUG_ASSERT(ecm_nss_ipv4_accelerated_count >= 0, "Bad accel counter\n");
1541	spin_unlock_bh(&ecm_nss_ipv4_lock);
1542}
1543
1544/*
1545 * ecm_nss_ported_ipv4_connection_ref()
1546 *	Ref a connection front end instance
1547 */
1548static void ecm_nss_ported_ipv4_connection_ref(struct ecm_front_end_connection_instance *feci)
1549{
1550	struct ecm_nss_ported_ipv4_connection_instance *npci = (struct ecm_nss_ported_ipv4_connection_instance *)feci;
1551
1552	DEBUG_CHECK_MAGIC(npci, ECM_NSS_PORTED_IPV4_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", npci);
1553	spin_lock_bh(&feci->lock);
1554	feci->refs++;
1555	DEBUG_TRACE("%p: npci ref %d\n", feci, feci->refs);
1556	DEBUG_ASSERT(feci->refs > 0, "%p: ref wrap\n", feci);
1557	spin_unlock_bh(&feci->lock);
1558}
1559
1560/*
1561 * ecm_nss_ported_ipv4_connection_deref()
1562 *	Deref a connection front end instance
1563 */
1564static int ecm_nss_ported_ipv4_connection_deref(struct ecm_front_end_connection_instance *feci)
1565{
1566	struct ecm_nss_ported_ipv4_connection_instance *npci = (struct ecm_nss_ported_ipv4_connection_instance *)feci;
1567
1568	DEBUG_CHECK_MAGIC(npci, ECM_NSS_PORTED_IPV4_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", npci);
1569
1570	spin_lock_bh(&feci->lock);
1571	feci->refs--;
1572	DEBUG_ASSERT(feci->refs >= 0, "%p: ref wrap\n", feci);
1573
1574	if (feci->refs > 0) {
1575		int refs = feci->refs;
1576		spin_unlock_bh(&feci->lock);
1577		DEBUG_TRACE("%p: npci deref %d\n", npci, refs);
1578		return refs;
1579	}
1580	spin_unlock_bh(&feci->lock);
1581
1582	/*
1583	 * We can now destroy the instance
1584	 */
1585	DEBUG_TRACE("%p: npci final\n", npci);
1586	DEBUG_CLEAR_MAGIC(npci);
1587	kfree(npci);
1588	return 0;
1589}
1590
1591#ifdef ECM_STATE_OUTPUT_ENABLE
1592/*
1593 * ecm_nss_ported_ipv4_connection_state_get()
1594 *	Return state of this ported front end instance
1595 */
1596static int ecm_nss_ported_ipv4_connection_state_get(struct ecm_front_end_connection_instance *feci, struct ecm_state_file_instance *sfi)
1597{
1598	int result;
1599	bool can_accel;
1600	ecm_front_end_acceleration_mode_t accel_mode;
1601	struct ecm_front_end_connection_mode_stats stats;
1602	struct ecm_nss_ported_ipv4_connection_instance *npci = (struct ecm_nss_ported_ipv4_connection_instance *)feci;
1603
1604	DEBUG_CHECK_MAGIC(npci, ECM_NSS_PORTED_IPV4_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", npci);
1605
1606	spin_lock_bh(&feci->lock);
1607	can_accel = feci->can_accel;
1608	accel_mode = feci->accel_mode;
1609	memcpy(&stats, &feci->stats, sizeof(struct ecm_front_end_connection_mode_stats));
1610	spin_unlock_bh(&feci->lock);
1611
1612	if ((result = ecm_state_prefix_add(sfi, "front_end_v4.ported"))) {
1613		return result;
1614	}
1615
1616	if ((result = ecm_state_write(sfi, "can_accel", "%d", can_accel))) {
1617		return result;
1618	}
1619	if ((result = ecm_state_write(sfi, "accel_mode", "%d", accel_mode))) {
1620		return result;
1621	}
1622	if ((result = ecm_state_write(sfi, "decelerate_pending", "%d", stats.decelerate_pending))) {
1623		return result;
1624	}
1625	if ((result = ecm_state_write(sfi, "flush_happened_total", "%d", stats.flush_happened_total))) {
1626		return result;
1627	}
1628	if ((result = ecm_state_write(sfi, "no_action_seen_total", "%d", stats.no_action_seen_total))) {
1629		return result;
1630	}
1631	if ((result = ecm_state_write(sfi, "no_action_seen", "%d", stats.no_action_seen))) {
1632		return result;
1633	}
1634	if ((result = ecm_state_write(sfi, "no_action_seen_limit", "%d", stats.no_action_seen_limit))) {
1635		return result;
1636	}
1637	if ((result = ecm_state_write(sfi, "driver_fail_total", "%d", stats.driver_fail_total))) {
1638		return result;
1639	}
1640	if ((result = ecm_state_write(sfi, "driver_fail", "%d", stats.driver_fail))) {
1641		return result;
1642	}
1643	if ((result = ecm_state_write(sfi, "driver_fail_limit", "%d", stats.driver_fail_limit))) {
1644		return result;
1645	}
1646	if ((result = ecm_state_write(sfi, "ae_nack_total", "%d", stats.ae_nack_total))) {
1647		return result;
1648	}
1649	if ((result = ecm_state_write(sfi, "ae_nack", "%d", stats.ae_nack))) {
1650		return result;
1651	}
1652	if ((result = ecm_state_write(sfi, "ae_nack_limit", "%d", stats.ae_nack_limit))) {
1653		return result;
1654	}
1655
1656 	return ecm_state_prefix_remove(sfi);
1657}
1658#endif
1659
1660/*
1661 * ecm_nss_ported_ipv4_connection_instance_alloc()
1662 *	Create a front end instance specific for ported connection
1663 */
1664static struct ecm_nss_ported_ipv4_connection_instance *ecm_nss_ported_ipv4_connection_instance_alloc(
1665								struct ecm_db_connection_instance *ci,
1666								int protocol,
1667								bool can_accel)
1668{
1669	struct ecm_nss_ported_ipv4_connection_instance *npci;
1670	struct ecm_front_end_connection_instance *feci;
1671
1672	npci = (struct ecm_nss_ported_ipv4_connection_instance *)kzalloc(sizeof(struct ecm_nss_ported_ipv4_connection_instance), GFP_ATOMIC | __GFP_NOWARN);
1673	if (!npci) {
1674		DEBUG_WARN("Ported Front end alloc failed\n");
1675		return NULL;
1676	}
1677
1678	/*
1679	 * Refs is 1 for the creator of the connection
1680	 */
1681	feci = (struct ecm_front_end_connection_instance *)npci;
1682	feci->refs = 1;
1683	DEBUG_SET_MAGIC(npci, ECM_NSS_PORTED_IPV4_CONNECTION_INSTANCE_MAGIC);
1684	spin_lock_init(&feci->lock);
1685
1686	feci->can_accel = can_accel;
1687	feci->accel_mode = (can_accel)? ECM_FRONT_END_ACCELERATION_MODE_DECEL : ECM_FRONT_END_ACCELERATION_MODE_FAIL_DENIED;
1688	spin_lock_bh(&ecm_nss_ipv4_lock);
1689	feci->stats.no_action_seen_limit = ecm_nss_ipv4_no_action_limit_default;
1690	feci->stats.driver_fail_limit = ecm_nss_ipv4_driver_fail_limit_default;
1691	feci->stats.ae_nack_limit = ecm_nss_ipv4_nack_limit_default;
1692	spin_unlock_bh(&ecm_nss_ipv4_lock);
1693
1694	/*
1695	 * Copy reference to connection - no need to ref ci as ci maintains a ref to this instance instead (this instance persists for as long as ci does)
1696	 */
1697	feci->ci = ci;
1698
1699	/*
1700	 * Populate the methods and callbacks
1701	 */
1702	feci->ref = ecm_nss_ported_ipv4_connection_ref;
1703	feci->deref = ecm_nss_ported_ipv4_connection_deref;
1704	feci->decelerate = ecm_nss_ported_ipv4_connection_decelerate;
1705	feci->accel_state_get = ecm_nss_ported_ipv4_connection_accel_state_get;
1706	feci->action_seen = ecm_nss_ported_ipv4_connection_action_seen;
1707	feci->accel_ceased = ecm_nss_ported_ipv4_connection_accel_ceased;
1708#ifdef ECM_STATE_OUTPUT_ENABLE
1709	feci->state_get = ecm_nss_ported_ipv4_connection_state_get;
1710#endif
1711	feci->ae_interface_number_by_dev_get = ecm_nss_common_get_interface_number_by_dev;
1712
1713	if (protocol == IPPROTO_TCP) {
1714		npci->ported_accelerated_count_index = ECM_NSS_PORTED_IPV4_PROTO_TCP;
1715	} else if (protocol == IPPROTO_UDP) {
1716		npci->ported_accelerated_count_index = ECM_NSS_PORTED_IPV4_PROTO_UDP;
1717	} else {
1718		DEBUG_WARN("%p: Wrong protocol: %d\n", npci, protocol);
1719		DEBUG_CLEAR_MAGIC(npci);
1720		kfree(npci);
1721		return NULL;
1722	}
1723
1724	return npci;
1725}
1726
1727/*
1728 * ecm_nss_ported_ipv4_process()
1729 *	Process a ported packet
1730 */
1731unsigned int ecm_nss_ported_ipv4_process(struct net_device *out_dev, struct net_device *out_dev_nat,
1732							struct net_device *in_dev, struct net_device *in_dev_nat,
1733							uint8_t *src_node_addr, uint8_t *src_node_addr_nat,
1734							uint8_t *dest_node_addr, uint8_t *dest_node_addr_nat,
1735							bool can_accel, bool is_routed, bool is_l2_encap, struct sk_buff *skb,
1736							struct ecm_tracker_ip_header *iph,
1737							struct nf_conn *ct, ecm_tracker_sender_type_t sender, ecm_db_direction_t ecm_dir,
1738							struct nf_conntrack_tuple *orig_tuple, struct nf_conntrack_tuple *reply_tuple,
1739							ip_addr_t ip_src_addr, ip_addr_t ip_dest_addr,
1740							ip_addr_t ip_src_addr_nat, ip_addr_t ip_dest_addr_nat)
1741{
1742	struct tcphdr *tcp_hdr;
1743	struct tcphdr tcp_hdr_buff;
1744	struct udphdr *udp_hdr;
1745	struct udphdr udp_hdr_buff;
1746	int src_port;
1747	int src_port_nat;
1748	int dest_port;
1749	int dest_port_nat;
1750	struct ecm_db_connection_instance *ci;
1751	ip_addr_t match_addr;
1752	struct ecm_classifier_instance *assignments[ECM_CLASSIFIER_TYPES];
1753	int aci_index;
1754	int assignment_count;
1755	ecm_db_timer_group_t ci_orig_timer_group;
1756	struct ecm_classifier_process_response prevalent_pr;
1757	int protocol = (int)orig_tuple->dst.protonum;
1758	__be16 *layer4hdr = NULL;
1759
1760	if (protocol == IPPROTO_TCP) {
1761		/*
1762		 * Extract TCP header to obtain port information
1763		 */
1764		tcp_hdr = ecm_tracker_tcp_check_header_and_read(skb, iph, &tcp_hdr_buff);
1765		if (unlikely(!tcp_hdr)) {
1766			DEBUG_WARN("TCP packet header %p\n", skb);
1767			return NF_ACCEPT;
1768		}
1769		//DNI need these port pkt directly for blocksite.
1770		if (ntohs(tcp_hdr->source) == 80 || ntohs(tcp_hdr->dest) == 80 ||
1771			ntohs(tcp_hdr->source) == 119 || ntohs(tcp_hdr->dest) == 119) {
1772			return NF_ACCEPT;
1773		}
1774
1775
1776		layer4hdr = (__be16 *)tcp_hdr;
1777
1778		/*
1779		 * Now extract information, if we have conntrack then use that (which would already be in the tuples)
1780		 */
1781		if (unlikely(!ct)) {
1782			orig_tuple->src.u.tcp.port = tcp_hdr->source;
1783			orig_tuple->dst.u.tcp.port = tcp_hdr->dest;
1784			reply_tuple->src.u.tcp.port = tcp_hdr->dest;
1785			reply_tuple->dst.u.tcp.port = tcp_hdr->source;
1786		}
1787
1788		/*
1789		 * Extract transport port information
1790		 * Refer to the ecm_nss_ipv4_process() for information on how we extract this information.
1791		 */
1792		if (sender == ECM_TRACKER_SENDER_TYPE_SRC) {
1793			if ((ecm_dir == ECM_DB_DIRECTION_EGRESS_NAT) || (ecm_dir == ECM_DB_DIRECTION_NON_NAT)) {
1794				src_port = ntohs(orig_tuple->src.u.tcp.port);
1795				dest_port = ntohs(orig_tuple->dst.u.tcp.port);
1796				dest_port_nat = ntohs(reply_tuple->src.u.tcp.port);
1797				src_port_nat = ntohs(reply_tuple->dst.u.tcp.port);
1798			} else if (ecm_dir == ECM_DB_DIRECTION_INGRESS_NAT) {
1799				src_port = ntohs(orig_tuple->src.u.tcp.port);
1800				dest_port_nat = ntohs(orig_tuple->dst.u.tcp.port);
1801				dest_port = ntohs(reply_tuple->src.u.tcp.port);
1802				src_port_nat = ntohs(reply_tuple->dst.u.tcp.port);
1803			} else if (ecm_dir == ECM_DB_DIRECTION_BRIDGED) {
1804				src_port = ntohs(orig_tuple->src.u.tcp.port);
1805				dest_port = ntohs(orig_tuple->dst.u.tcp.port);
1806				dest_port_nat = ntohs(reply_tuple->src.u.tcp.port);
1807				src_port_nat = ntohs(reply_tuple->dst.u.tcp.port);
1808			} else {
1809				DEBUG_ASSERT(false, "Unhandled ecm_dir: %d\n", ecm_dir);
1810			}
1811		} else {
1812			if ((ecm_dir == ECM_DB_DIRECTION_EGRESS_NAT) || (ecm_dir == ECM_DB_DIRECTION_NON_NAT)) {
1813				dest_port = ntohs(orig_tuple->src.u.tcp.port);
1814				src_port = ntohs(orig_tuple->dst.u.tcp.port);
1815				src_port_nat = ntohs(reply_tuple->src.u.tcp.port);
1816				dest_port_nat = ntohs(reply_tuple->dst.u.tcp.port);
1817			} else if (ecm_dir == ECM_DB_DIRECTION_INGRESS_NAT) {
1818				dest_port = ntohs(orig_tuple->src.u.tcp.port);
1819				src_port_nat = ntohs(orig_tuple->dst.u.tcp.port);
1820				src_port = ntohs(reply_tuple->src.u.tcp.port);
1821				dest_port_nat = ntohs(reply_tuple->dst.u.tcp.port);
1822			} else if (ecm_dir == ECM_DB_DIRECTION_BRIDGED) {
1823				dest_port = ntohs(orig_tuple->src.u.tcp.port);
1824				src_port = ntohs(orig_tuple->dst.u.tcp.port);
1825				src_port_nat = ntohs(reply_tuple->src.u.tcp.port);
1826				dest_port_nat = ntohs(reply_tuple->dst.u.tcp.port);
1827			} else {
1828				DEBUG_ASSERT(false, "Unhandled ecm_dir: %d\n", ecm_dir);
1829			}
1830		}
1831
1832		DEBUG_TRACE("TCP src: " ECM_IP_ADDR_DOT_FMT "(" ECM_IP_ADDR_DOT_FMT "):%d(%d), dest: " ECM_IP_ADDR_DOT_FMT "(" ECM_IP_ADDR_DOT_FMT "):%d(%d), dir %d\n",
1833				ECM_IP_ADDR_TO_DOT(ip_src_addr), ECM_IP_ADDR_TO_DOT(ip_src_addr_nat), src_port, src_port_nat, ECM_IP_ADDR_TO_DOT(ip_dest_addr),
1834				ECM_IP_ADDR_TO_DOT(ip_dest_addr_nat), dest_port, dest_port_nat, ecm_dir);
1835	} else if (protocol == IPPROTO_UDP) {
1836		/*
1837		 * Extract UDP header to obtain port information
1838		 */
1839		udp_hdr = ecm_tracker_udp_check_header_and_read(skb, iph, &udp_hdr_buff);
1840		if (unlikely(!udp_hdr)) {
1841			DEBUG_WARN("Invalid UDP header in skb %p\n", skb);
1842			return NF_ACCEPT;
1843		}
1844		//DNI need these port pkt directly for l2tp.
1845		if (ntohs(udp_hdr->dest) == 1701 || ntohs(udp_hdr->dest) == 137 || ntohs(udp_hdr->source) == 137)
1846			return NF_ACCEPT;
1847
1848
1849		layer4hdr = (__be16 *)udp_hdr;
1850
1851		/*
1852		 * Now extract information, if we have conntrack then use that (which would already be in the tuples)
1853		 */
1854		if (unlikely(!ct)) {
1855			orig_tuple->src.u.udp.port = udp_hdr->source;
1856			orig_tuple->dst.u.udp.port = udp_hdr->dest;
1857			reply_tuple->src.u.udp.port = udp_hdr->dest;
1858			reply_tuple->dst.u.udp.port = udp_hdr->source;
1859		}
1860
1861		/*
1862		 * Extract transport port information
1863		 * Refer to the ecm_nss_ipv4_process() for information on how we extract this information.
1864		 */
1865		if (sender == ECM_TRACKER_SENDER_TYPE_SRC) {
1866			if ((ecm_dir == ECM_DB_DIRECTION_EGRESS_NAT) || (ecm_dir == ECM_DB_DIRECTION_NON_NAT)) {
1867				src_port = ntohs(orig_tuple->src.u.udp.port);
1868				dest_port = ntohs(orig_tuple->dst.u.udp.port);
1869				dest_port_nat = ntohs(reply_tuple->src.u.udp.port);
1870				src_port_nat = ntohs(reply_tuple->dst.u.udp.port);
1871			} else if (ecm_dir == ECM_DB_DIRECTION_INGRESS_NAT) {
1872				src_port = ntohs(orig_tuple->src.u.udp.port);
1873				dest_port_nat = ntohs(orig_tuple->dst.u.udp.port);
1874				dest_port = ntohs(reply_tuple->src.u.udp.port);
1875				src_port_nat = ntohs(reply_tuple->dst.u.udp.port);
1876			} else if (ecm_dir == ECM_DB_DIRECTION_BRIDGED) {
1877				src_port = ntohs(orig_tuple->src.u.udp.port);
1878				dest_port = ntohs(orig_tuple->dst.u.udp.port);
1879				dest_port_nat = ntohs(reply_tuple->src.u.udp.port);
1880				src_port_nat = ntohs(reply_tuple->dst.u.udp.port);
1881			} else {
1882				DEBUG_ASSERT(false, "Unhandled ecm_dir: %d\n", ecm_dir);
1883			}
1884		} else {
1885			if ((ecm_dir == ECM_DB_DIRECTION_EGRESS_NAT) || (ecm_dir == ECM_DB_DIRECTION_NON_NAT)) {
1886				dest_port = ntohs(orig_tuple->src.u.udp.port);
1887				src_port = ntohs(orig_tuple->dst.u.udp.port);
1888				src_port_nat = ntohs(reply_tuple->src.u.udp.port);
1889				dest_port_nat = ntohs(reply_tuple->dst.u.udp.port);
1890			} else if (ecm_dir == ECM_DB_DIRECTION_INGRESS_NAT) {
1891				dest_port = ntohs(orig_tuple->src.u.udp.port);
1892				src_port_nat = ntohs(orig_tuple->dst.u.udp.port);
1893				src_port = ntohs(reply_tuple->src.u.udp.port);
1894				dest_port_nat = ntohs(reply_tuple->dst.u.udp.port);
1895			} else if (ecm_dir == ECM_DB_DIRECTION_BRIDGED) {
1896				dest_port = ntohs(orig_tuple->src.u.udp.port);
1897				src_port = ntohs(orig_tuple->dst.u.udp.port);
1898				src_port_nat = ntohs(reply_tuple->src.u.udp.port);
1899				dest_port_nat = ntohs(reply_tuple->dst.u.udp.port);
1900			} else {
1901				DEBUG_ASSERT(false, "Unhandled ecm_dir: %d\n", ecm_dir);
1902			}
1903		}
1904		DEBUG_TRACE("UDP src: " ECM_IP_ADDR_DOT_FMT ":%d, dest: " ECM_IP_ADDR_DOT_FMT ":%d, dir %d\n",
1905				ECM_IP_ADDR_TO_DOT(ip_src_addr), src_port, ECM_IP_ADDR_TO_DOT(ip_dest_addr), dest_port, ecm_dir);
1906	} else {
1907		DEBUG_WARN("Wrong protocol: %d\n", protocol);
1908		return NF_ACCEPT;
1909	}
1910
1911	/*
1912	 * Look up a connection
1913	 */
1914	ci = ecm_db_connection_find_and_ref(ip_src_addr, ip_dest_addr, protocol, src_port, dest_port);
1915
1916	/*
1917	 * If there is no existing connection then create a new one.
1918	 */
1919	if (unlikely(!ci)) {
1920		struct ecm_db_mapping_instance *src_mi;
1921		struct ecm_db_mapping_instance *dest_mi;
1922		struct ecm_db_mapping_instance *src_nat_mi;
1923		struct ecm_db_mapping_instance *dest_nat_mi;
1924		struct ecm_db_node_instance *src_ni;
1925		struct ecm_db_node_instance *dest_ni;
1926		struct ecm_db_node_instance *src_nat_ni;
1927		struct ecm_db_node_instance *dest_nat_ni;
1928		struct ecm_classifier_default_instance *dci;
1929		struct ecm_db_connection_instance *nci;
1930		ecm_classifier_type_t classifier_type;
1931		struct ecm_front_end_connection_instance *feci;
1932		int32_t to_list_first;
1933		struct ecm_db_iface_instance *to_list[ECM_DB_IFACE_HEIRARCHY_MAX];
1934		int32_t to_nat_list_first;
1935		struct ecm_db_iface_instance *to_nat_list[ECM_DB_IFACE_HEIRARCHY_MAX];
1936		int32_t from_list_first;
1937		struct ecm_db_iface_instance *from_list[ECM_DB_IFACE_HEIRARCHY_MAX];
1938		int32_t from_nat_list_first;
1939		struct ecm_db_iface_instance *from_nat_list[ECM_DB_IFACE_HEIRARCHY_MAX];
1940
1941		DEBUG_INFO("New ported connection from " ECM_IP_ADDR_DOT_FMT ":%u to " ECM_IP_ADDR_DOT_FMT ":%u protocol: %d\n",
1942				ECM_IP_ADDR_TO_DOT(ip_src_addr), src_port, ECM_IP_ADDR_TO_DOT(ip_dest_addr), dest_port, protocol);
1943
1944		/*
1945		 * Before we attempt to create the connection are we being terminated?
1946		 */
1947		spin_lock_bh(&ecm_nss_ipv4_lock);
1948		if (ecm_nss_ipv4_terminate_pending) {
1949			spin_unlock_bh(&ecm_nss_ipv4_lock);
1950			DEBUG_WARN("Terminating\n");
1951
1952			/*
1953			 * As we are terminating we just allow the packet to pass - it's no longer our concern
1954			 */
1955			return NF_ACCEPT;
1956		}
1957		spin_unlock_bh(&ecm_nss_ipv4_lock);
1958
1959		/*
1960		 * Does this connection have a conntrack entry?
1961		 */
1962		if (ct) {
1963			unsigned int conn_count;
1964
1965			/*
1966			 * If we have exceeded the connection limit (according to conntrack) then abort
1967			 * NOTE: Conntrack, when at its limit, will destroy a connection to make way for a new.
1968			 * Conntrack won't exceed its limit but ECM can due to it needing to hold connections while
1969			 * acceleration commands are in-flight.
1970			 * This means that ECM can 'fall behind' somewhat with the connection state wrt conntrack connection state.
1971			 * This is not seen as an issue since conntrack will have issued us with a destroy event for the flushed connection(s)
1972			 * and we will eventually catch up.
1973			 * Since ECM is capable of handling connections mid-flow ECM will pick up where it can.
1974			 */
1975			conn_count = (unsigned int)ecm_db_connection_count_get();
1976			if (conn_count >= nf_conntrack_max) {
1977				DEBUG_WARN("ECM Connection count limit reached: db: %u, ct: %u\n", conn_count, nf_conntrack_max);
1978				return NF_ACCEPT;
1979			}
1980
1981			if (protocol == IPPROTO_TCP) {
1982				/*
1983				 * No point in establishing a connection for one that is closing
1984				 */
1985				spin_lock_bh(&ct->lock);
1986				if (ct->proto.tcp.state >= TCP_CONNTRACK_FIN_WAIT && ct->proto.tcp.state <= TCP_CONNTRACK_CLOSE) {
1987					spin_unlock_bh(&ct->lock);
1988					DEBUG_TRACE("%p: Connection in termination state %#X\n", ct, ct->proto.tcp.state);
1989					return NF_ACCEPT;
1990				}
1991				spin_unlock_bh(&ct->lock);
1992			}
1993		}
1994
1995		/*
1996		 * Now allocate the new connection
1997		 */
1998		nci = ecm_db_connection_alloc();
1999		if (!nci) {
2000			DEBUG_WARN("Failed to allocate connection\n");
2001			return NF_ACCEPT;
2002		}
2003
2004		/*
2005		 * Connection must have a front end instance associated with it
2006		 */
2007		feci = (struct ecm_front_end_connection_instance *)ecm_nss_ported_ipv4_connection_instance_alloc(nci, protocol, can_accel);
2008		if (!feci) {
2009			ecm_db_connection_deref(nci);
2010			DEBUG_WARN("Failed to allocate front end\n");
2011			return NF_ACCEPT;
2012		}
2013
2014		/*
2015		 * Get the src and destination mappings.
2016		 * For this we also need the interface lists which we also set upon the new connection while we are at it.
2017		 * GGG TODO rework terms of "src/dest" - these need to be named consistently as from/to as per database terms.
2018		 * GGG TODO The empty list checks should not be needed, mapping_establish_and_ref() should fail out if there is no list anyway.
2019		 */
2020		DEBUG_TRACE("%p: Create the 'from' interface heirarchy list\n", nci);
2021		from_list_first = ecm_interface_heirarchy_construct(feci, from_list, ip_dest_addr, ip_src_addr, 4, protocol, in_dev, is_routed, in_dev, src_node_addr, dest_node_addr, layer4hdr);
2022		if (from_list_first == ECM_DB_IFACE_HEIRARCHY_MAX) {
2023			feci->deref(feci);
2024			ecm_db_connection_deref(nci);
2025			DEBUG_WARN("Failed to obtain 'from' heirarchy list\n");
2026			return NF_ACCEPT;
2027		}
2028		ecm_db_connection_from_interfaces_reset(nci, from_list, from_list_first);
2029
2030		DEBUG_TRACE("%p: Create source node\n", nci);
2031		src_ni = ecm_nss_ipv4_node_establish_and_ref(feci, in_dev, ip_src_addr, from_list, from_list_first, src_node_addr);
2032		ecm_db_connection_interfaces_deref(from_list, from_list_first);
2033		if (!src_ni) {
2034			feci->deref(feci);
2035			ecm_db_connection_deref(nci);
2036			DEBUG_WARN("Failed to establish source node\n");
2037			return NF_ACCEPT;
2038		}
2039
2040		DEBUG_TRACE("%p: Create source mapping\n", nci);
2041		src_mi = ecm_nss_ipv4_mapping_establish_and_ref(ip_src_addr, src_port);
2042		if (!src_mi) {
2043			ecm_db_node_deref(src_ni);
2044			feci->deref(feci);
2045			ecm_db_connection_deref(nci);
2046			DEBUG_WARN("Failed to establish src mapping\n");
2047			return NF_ACCEPT;
2048		}
2049
2050		DEBUG_TRACE("%p: Create the 'to' interface heirarchy list\n", nci);
2051		to_list_first = ecm_interface_heirarchy_construct(feci, to_list, ip_src_addr, ip_dest_addr, 4, protocol, out_dev, is_routed, in_dev, dest_node_addr, src_node_addr, layer4hdr);
2052		if (to_list_first == ECM_DB_IFACE_HEIRARCHY_MAX) {
2053			ecm_db_mapping_deref(src_mi);
2054			ecm_db_node_deref(src_ni);
2055			feci->deref(feci);
2056			ecm_db_connection_deref(nci);
2057			DEBUG_WARN("Failed to obtain 'to' heirarchy list\n");
2058			return NF_ACCEPT;
2059		}
2060		ecm_db_connection_to_interfaces_reset(nci, to_list, to_list_first);
2061
2062		DEBUG_TRACE("%p: Create dest node\n", nci);
2063		dest_ni = ecm_nss_ipv4_node_establish_and_ref(feci, out_dev, ip_dest_addr, to_list, to_list_first, dest_node_addr);
2064		ecm_db_connection_interfaces_deref(to_list, to_list_first);
2065		if (!dest_ni) {
2066			ecm_db_mapping_deref(src_mi);
2067			ecm_db_node_deref(src_ni);
2068			feci->deref(feci);
2069			ecm_db_connection_deref(nci);
2070			DEBUG_WARN("Failed to establish dest node\n");
2071			return NF_ACCEPT;
2072		}
2073
2074		DEBUG_TRACE("%p: Create dest mapping\n", nci);
2075		dest_mi = ecm_nss_ipv4_mapping_establish_and_ref(ip_dest_addr, dest_port);
2076		if (!dest_mi) {
2077			ecm_db_node_deref(dest_ni);
2078			ecm_db_mapping_deref(src_mi);
2079			ecm_db_node_deref(src_ni);
2080			feci->deref(feci);
2081			ecm_db_connection_deref(nci);
2082			DEBUG_WARN("Failed to establish dest mapping\n");
2083			return NF_ACCEPT;
2084		}
2085
2086		/*
2087		 * Get the src and destination NAT mappings
2088		 * For this we also need the interface lists which we also set upon the new connection while we are at it.
2089		 * GGG TODO rework terms of "src/dest" - these need to be named consistently as from/to as per database terms.
2090		 * GGG TODO The empty list checks should not be needed, mapping_establish_and_ref() should fail out if there is no list anyway.
2091		 */
2092		DEBUG_TRACE("%p: Create the 'from NAT' interface heirarchy list\n", nci);
2093		from_nat_list_first = ecm_interface_heirarchy_construct(feci, from_nat_list, ip_dest_addr, ip_src_addr_nat, 4, protocol, in_dev_nat, is_routed, in_dev_nat, src_node_addr_nat, dest_node_addr_nat, layer4hdr);
2094		if (from_nat_list_first == ECM_DB_IFACE_HEIRARCHY_MAX) {
2095			ecm_db_mapping_deref(dest_mi);
2096			ecm_db_node_deref(dest_ni);
2097			ecm_db_mapping_deref(src_mi);
2098			ecm_db_node_deref(src_ni);
2099			feci->deref(feci);
2100			ecm_db_connection_deref(nci);
2101			DEBUG_WARN("Failed to obtain 'from NAT' heirarchy list\n");
2102			return NF_ACCEPT;
2103		}
2104		ecm_db_connection_from_nat_interfaces_reset(nci, from_nat_list, from_nat_list_first);
2105
2106		DEBUG_TRACE("%p: Create source nat node\n", nci);
2107		src_nat_ni = ecm_nss_ipv4_node_establish_and_ref(feci, in_dev_nat, ip_src_addr_nat, from_nat_list, from_nat_list_first, src_node_addr_nat);
2108		ecm_db_connection_interfaces_deref(from_nat_list, from_nat_list_first);
2109		if (!src_nat_ni) {
2110			ecm_db_mapping_deref(dest_mi);
2111			ecm_db_node_deref(dest_ni);
2112			ecm_db_mapping_deref(src_mi);
2113			ecm_db_node_deref(src_ni);
2114			feci->deref(feci);
2115			ecm_db_connection_deref(nci);
2116			DEBUG_WARN("Failed to establish source nat node\n");
2117			return NF_ACCEPT;
2118		}
2119
2120		src_nat_mi = ecm_nss_ipv4_mapping_establish_and_ref(ip_src_addr_nat, src_port_nat);
2121		if (!src_nat_mi) {
2122			ecm_db_node_deref(src_nat_ni);
2123			ecm_db_mapping_deref(dest_mi);
2124			ecm_db_node_deref(dest_ni);
2125			ecm_db_mapping_deref(src_mi);
2126			ecm_db_node_deref(src_ni);
2127			feci->deref(feci);
2128			ecm_db_connection_deref(nci);
2129			DEBUG_WARN("Failed to establish src nat mapping\n");
2130			return NF_ACCEPT;
2131		}
2132
2133		DEBUG_TRACE("%p: Create the 'to NAT' interface heirarchy list\n", nci);
2134		to_nat_list_first = ecm_interface_heirarchy_construct(feci, to_nat_list, ip_src_addr, ip_dest_addr_nat, 4, protocol, out_dev_nat, is_routed, in_dev, dest_node_addr_nat, src_node_addr_nat, layer4hdr);
2135		if (to_nat_list_first == ECM_DB_IFACE_HEIRARCHY_MAX) {
2136			ecm_db_mapping_deref(src_nat_mi);
2137			ecm_db_node_deref(src_nat_ni);
2138			ecm_db_mapping_deref(dest_mi);
2139			ecm_db_node_deref(dest_ni);
2140			ecm_db_mapping_deref(src_mi);
2141			ecm_db_node_deref(src_ni);
2142			feci->deref(feci);
2143			ecm_db_connection_deref(nci);
2144			DEBUG_WARN("Failed to obtain 'to NAT' heirarchy list\n");
2145			return NF_ACCEPT;
2146		}
2147		ecm_db_connection_to_nat_interfaces_reset(nci, to_nat_list, to_nat_list_first);
2148
2149		DEBUG_TRACE("%p: Create dest nat node\n", nci);
2150		dest_nat_ni = ecm_nss_ipv4_node_establish_and_ref(feci, out_dev_nat, ip_dest_addr_nat, to_nat_list, to_nat_list_first, dest_node_addr_nat);
2151		ecm_db_connection_interfaces_deref(to_nat_list, to_nat_list_first);
2152		if (!dest_nat_ni) {
2153			ecm_db_mapping_deref(src_nat_mi);
2154			ecm_db_node_deref(src_nat_ni);
2155			ecm_db_mapping_deref(dest_mi);
2156			ecm_db_node_deref(dest_ni);
2157			ecm_db_mapping_deref(src_mi);
2158			ecm_db_node_deref(src_ni);
2159			feci->deref(feci);
2160			ecm_db_connection_deref(nci);
2161			DEBUG_WARN("Failed to establish dest nat node\n");
2162			return NF_ACCEPT;
2163		}
2164
2165		dest_nat_mi = ecm_nss_ipv4_mapping_establish_and_ref(ip_dest_addr_nat, dest_port_nat);
2166		if (!dest_nat_mi) {
2167			ecm_db_node_deref(dest_nat_ni);
2168			ecm_db_mapping_deref(src_nat_mi);
2169			ecm_db_node_deref(src_nat_ni);
2170			ecm_db_mapping_deref(dest_mi);
2171			ecm_db_node_deref(dest_ni);
2172			ecm_db_mapping_deref(src_mi);
2173			ecm_db_node_deref(src_ni);
2174			feci->deref(feci);
2175			ecm_db_connection_deref(nci);
2176			DEBUG_WARN("Failed to establish dest mapping\n");
2177			return NF_ACCEPT;
2178		}
2179
2180		/*
2181		 * Every connection also needs a default classifier which is considered 'special' to be assigned
2182		 */
2183		dci = ecm_classifier_default_instance_alloc(nci, protocol, ecm_dir, src_port, dest_port);
2184		if (!dci) {
2185			ecm_db_mapping_deref(dest_nat_mi);
2186			ecm_db_node_deref(dest_nat_ni);
2187			ecm_db_mapping_deref(src_nat_mi);
2188			ecm_db_node_deref(src_nat_ni);
2189			ecm_db_mapping_deref(dest_mi);
2190			ecm_db_node_deref(dest_ni);
2191			ecm_db_mapping_deref(src_mi);
2192			ecm_db_node_deref(src_ni);
2193			feci->deref(feci);
2194			ecm_db_connection_deref(nci);
2195			DEBUG_WARN("Failed to allocate default classifier\n");
2196			return NF_ACCEPT;
2197		}
2198		ecm_db_connection_classifier_assign(nci, (struct ecm_classifier_instance *)dci);
2199
2200		/*
2201		 * Every connection starts with a full complement of classifiers assigned.
2202		 * NOTE: Default classifier is a special case considered previously
2203		 */
2204		for (classifier_type = ECM_CLASSIFIER_TYPE_DEFAULT + 1; classifier_type < ECM_CLASSIFIER_TYPES; ++classifier_type) {
2205			struct ecm_classifier_instance *aci = ecm_nss_ipv4_assign_classifier(nci, classifier_type);
2206			if (aci) {
2207				aci->deref(aci);
2208			} else {
2209				dci->base.deref((struct ecm_classifier_instance *)dci);
2210				ecm_db_mapping_deref(dest_nat_mi);
2211				ecm_db_node_deref(dest_nat_ni);
2212				ecm_db_mapping_deref(src_nat_mi);
2213				ecm_db_node_deref(src_nat_ni);
2214				ecm_db_mapping_deref(dest_mi);
2215				ecm_db_node_deref(dest_ni);
2216				ecm_db_mapping_deref(src_mi);
2217				ecm_db_node_deref(src_ni);
2218				feci->deref(feci);
2219				ecm_db_connection_deref(nci);
2220				DEBUG_WARN("Failed to allocate classifiers assignments\n");
2221				return NF_ACCEPT;
2222			}
2223		}
2224
2225		/*
2226		 * Now add the connection into the database.
2227		 * NOTE: In an SMP situation such as ours there is a possibility that more than one packet for the same
2228		 * connection is being processed simultaneously.
2229		 * We *could* end up creating more than one connection instance for the same actual connection.
2230		 * To guard against this we now perform a mutex'd lookup of the connection + add once more - another cpu may have created it before us.
2231		 */
2232		spin_lock_bh(&ecm_nss_ipv4_lock);
2233		ci = ecm_db_connection_find_and_ref(ip_src_addr, ip_dest_addr, protocol, src_port, dest_port);
2234		if (ci) {
2235			/*
2236			 * Another cpu created the same connection before us - use the one we just found
2237			 */
2238			spin_unlock_bh(&ecm_nss_ipv4_lock);
2239			ecm_db_connection_deref(nci);
2240		} else {
2241			ecm_db_timer_group_t tg;
2242			ecm_tracker_sender_state_t src_state;
2243			ecm_tracker_sender_state_t dest_state;
2244			ecm_tracker_connection_state_t state;
2245			struct ecm_tracker_instance *ti;
2246
2247			/*
2248			 * Ask tracker for timer group to set the connection to initially.
2249			 */
2250			ti = dci->tracker_get_and_ref(dci);
2251			ti->state_get(ti, &src_state, &dest_state, &state, &tg);
2252			ti->deref(ti);
2253
2254			/*
2255			 * Add the new connection we created into the database
2256			 * NOTE: assign to a short timer group for now - it is the assigned classifiers responsibility to do this
2257			 */
2258			ecm_db_connection_add(nci, feci, src_mi, dest_mi, src_nat_mi, dest_nat_mi,
2259					src_ni, dest_ni, src_nat_ni, dest_nat_ni,
2260					4, protocol, ecm_dir,
2261					NULL /* final callback */,
2262					ecm_nss_ported_ipv4_connection_defunct_callback,
2263					tg, is_routed, nci);
2264
2265			spin_unlock_bh(&ecm_nss_ipv4_lock);
2266
2267			ci = nci;
2268			DEBUG_INFO("%p: New ported connection created\n", ci);
2269		}
2270
2271		/*
2272		 * No longer need referenecs to the objects we created
2273		 */
2274		dci->base.deref((struct ecm_classifier_instance *)dci);
2275		ecm_db_mapping_deref(dest_nat_mi);
2276		ecm_db_node_deref(dest_nat_ni);
2277		ecm_db_mapping_deref(src_nat_mi);
2278		ecm_db_node_deref(src_nat_ni);
2279		ecm_db_mapping_deref(dest_mi);
2280		ecm_db_node_deref(dest_ni);
2281		ecm_db_mapping_deref(src_mi);
2282		ecm_db_node_deref(src_ni);
2283		feci->deref(feci);
2284	}
2285
2286	/*
2287	 * Identify which side of the connection is sending.
2288	 * NOTE: This may be different than what sender is at the moment
2289	 * given the connection we have located.
2290	 */
2291	ecm_db_connection_from_address_get(ci, match_addr);
2292	if (ECM_IP_ADDR_MATCH(ip_src_addr, match_addr)) {
2293		sender = ECM_TRACKER_SENDER_TYPE_SRC;
2294	} else {
2295		sender = ECM_TRACKER_SENDER_TYPE_DEST;
2296	}
2297
2298	/*
2299	 * In nat reflection scenarios SNAT rule is getting applied on the packet after packet
2300	 * passed through bridge post routing hook
2301	 *
2302	 * Example
2303	 * Consider following scenario where both WLAN PC and eth1 are part of same bridge
2304	 * 192.168.1.3(WLAN PC)<-->192.168.1.1(DUT br-lan)---> 192.168.1.4(Eth1 PC)
2305	 * When a DNAT is applied it is observed that following NAT rules are appended in iptables
2306	 *
2307	 * -A nat_reflection_out -s 192.168.1.0/24 -d 192.168.1.4/32 -p tcp -m tcp --dport 3389 -m comment --comment "wan" -j SNAT
2308	 * 		 --to-source 192.168.1.1
2309	 * -A nat_reflection_out -s 192.168.1.0/24 -d 192.168.1.4/32 -p udp -m udp --dport 3389 -m comment --comment "wan" -j SNAT
2310	 * 		 --to-source 192.168.1.1
2311	 *
2312	 * This Shows that SNAT is getting applied on bridged packet also. However it is observed that
2313	 * the SNAT is updated in ct after the packet has crossed this function through bridge hook.
2314	 *
2315	 * Hence Flushing the connection that was already created earlier if the ip_src_addr_nat value changes for same tuple in
2316	 * subsequent packets
2317	 */
2318	ecm_db_connection_from_address_nat_get(ci, match_addr);
2319	if (!ECM_IP_ADDR_MATCH(ip_src_addr_nat, match_addr) && ct && (sender == ECM_TRACKER_SENDER_TYPE_SRC)) {
2320		/*
2321		 * Force destruction of the connection my making it defunct
2322		 */
2323		ecm_db_connection_make_defunct(ci);
2324		ecm_db_connection_deref(ci);
2325		return NF_ACCEPT;
2326	}
2327
2328	/*
2329	 * Keep connection alive as we have seen activity
2330	 */
2331	if (!ecm_db_connection_defunct_timer_touch(ci)) {
2332		ecm_db_connection_deref(ci);
2333		return NF_ACCEPT;
2334	}
2335
2336	/*
2337	 * Do we need to action generation change?
2338	 */
2339	if (unlikely(ecm_db_connection_regeneration_required_check(ci))) {
2340		ecm_nss_ipv4_connection_regenerate(ci, sender, out_dev, out_dev_nat, in_dev, in_dev_nat, layer4hdr);
2341	}
2342
2343	/*
2344	 * Iterate the assignments and call to process!
2345	 * Policy implemented:
2346	 * 1. Classifiers that say they are not relevant are unassigned and not actioned further.
2347	 * 2. Any drop command from any classifier is honoured.
2348	 * 3. All classifiers must action acceleration for accel to be honoured, any classifiers not sure of their relevance will stop acceleration.
2349	 * 4. Only the highest priority classifier, that actions it, will have its qos tag honoured.
2350	 * 5. Only the highest priority classifier, that actions it, will have its timer group honoured.
2351	 */
2352	DEBUG_TRACE("%p: process begin, skb: %p\n", ci, skb);
2353	prevalent_pr.process_actions = 0;
2354	prevalent_pr.drop = false;
2355	prevalent_pr.flow_qos_tag = skb->priority;
2356	prevalent_pr.return_qos_tag = skb->priority;
2357	prevalent_pr.accel_mode = ECM_CLASSIFIER_ACCELERATION_MODE_ACCEL;
2358	prevalent_pr.timer_group = ci_orig_timer_group = ecm_db_connection_timer_group_get(ci);
2359
2360	assignment_count = ecm_db_connection_classifier_assignments_get_and_ref(ci, assignments);
2361	for (aci_index = 0; aci_index < assignment_count; ++aci_index) {
2362		struct ecm_classifier_process_response aci_pr;
2363		struct ecm_classifier_instance *aci;
2364
2365		aci = assignments[aci_index];
2366		DEBUG_TRACE("%p: process: %p, type: %d\n", ci, aci, aci->type_get(aci));
2367		aci->process(aci, sender, iph, skb, &aci_pr);
2368		DEBUG_TRACE("%p: aci_pr: process actions: %x, became relevant: %u, relevance: %d, drop: %d, "
2369				"flow_qos_tag: %u, return_qos_tag: %u, accel_mode: %x, timer_group: %d\n",
2370				ci, aci_pr.process_actions, aci_pr.became_relevant, aci_pr.relevance, aci_pr.drop,
2371				aci_pr.flow_qos_tag, aci_pr.return_qos_tag, aci_pr.accel_mode, aci_pr.timer_group);
2372
2373		if (aci_pr.relevance == ECM_CLASSIFIER_RELEVANCE_NO) {
2374			ecm_classifier_type_t aci_type;
2375
2376			/*
2377			 * This classifier can be unassigned - PROVIDED it is not the default classifier
2378			 */
2379			aci_type = aci->type_get(aci);
2380			if (aci_type == ECM_CLASSIFIER_TYPE_DEFAULT) {
2381				continue;
2382			}
2383
2384			DEBUG_INFO("%p: Classifier not relevant, unassign: %d", ci, aci_type);
2385			ecm_db_connection_classifier_unassign(ci, aci);
2386			continue;
2387		}
2388
2389		/*
2390		 * Yes or Maybe relevant.
2391		 */
2392		if (aci_pr.process_actions & ECM_CLASSIFIER_PROCESS_ACTION_DROP) {
2393			/*
2394			 * Drop command from any classifier is actioned.
2395			 */
2396			DEBUG_TRACE("%p: wants drop: %p, type: %d, skb: %p\n", ci, aci, aci->type_get(aci), skb);
2397			prevalent_pr.drop |= aci_pr.drop;
2398		}
2399
2400		/*
2401		 * Accel mode permission
2402		 */
2403		if (aci_pr.relevance == ECM_CLASSIFIER_RELEVANCE_MAYBE) {
2404			/*
2405			 * Classifier not sure of its relevance - cannot accel yet
2406			 */
2407			DEBUG_TRACE("%p: accel denied by maybe: %p, type: %d\n", ci, aci, aci->type_get(aci));
2408			prevalent_pr.accel_mode = ECM_CLASSIFIER_ACCELERATION_MODE_NO;
2409		} else {
2410			if (aci_pr.process_actions & ECM_CLASSIFIER_PROCESS_ACTION_ACCEL_MODE) {
2411				if (aci_pr.accel_mode == ECM_CLASSIFIER_ACCELERATION_MODE_NO) {
2412					DEBUG_TRACE("%p: accel denied: %p, type: %d\n", ci, aci, aci->type_get(aci));
2413					prevalent_pr.accel_mode = ECM_CLASSIFIER_ACCELERATION_MODE_NO;
2414				}
2415				/* else yes or don't care about accel */
2416			}
2417		}
2418
2419		/*
2420		 * Timer group (the last classifier i.e. the highest priority one) will 'win'
2421		 */
2422		if (aci_pr.process_actions & ECM_CLASSIFIER_PROCESS_ACTION_TIMER_GROUP) {
2423			DEBUG_TRACE("%p: timer group: %p, type: %d, group: %d\n", ci, aci, aci->type_get(aci), aci_pr.timer_group);
2424			prevalent_pr.timer_group = aci_pr.timer_group;
2425		}
2426
2427		/*
2428		 * Qos tag (the last classifier i.e. the highest priority one) will 'win'
2429		 */
2430		if (aci_pr.process_actions & ECM_CLASSIFIER_PROCESS_ACTION_QOS_TAG) {
2431			DEBUG_TRACE("%p: aci: %p, type: %d, flow qos tag: %u, return qos tag: %u\n",
2432					ci, aci, aci->type_get(aci), aci_pr.flow_qos_tag, aci_pr.return_qos_tag);
2433			prevalent_pr.flow_qos_tag = aci_pr.flow_qos_tag;
2434			prevalent_pr.return_qos_tag = aci_pr.return_qos_tag;
2435		}
2436
2437#ifdef ECM_CLASSIFIER_DSCP_ENABLE
2438		/*
2439		 * If any classifier denied DSCP remarking then that overrides every classifier
2440		 */
2441		if (aci_pr.process_actions & ECM_CLASSIFIER_PROCESS_ACTION_DSCP_DENY) {
2442			DEBUG_TRACE("%p: aci: %p, type: %d, DSCP remark denied\n",
2443					ci, aci, aci->type_get(aci));
2444			prevalent_pr.process_actions |= ECM_CLASSIFIER_PROCESS_ACTION_DSCP_DENY;
2445			prevalent_pr.process_actions &= ~ECM_CLASSIFIER_PROCESS_ACTION_DSCP;
2446		}
2447
2448		/*
2449		 * DSCP remark action, but only if it has not been denied by any classifier
2450		 */
2451		if (aci_pr.process_actions & ECM_CLASSIFIER_PROCESS_ACTION_DSCP) {
2452			if (!(prevalent_pr.process_actions & ECM_CLASSIFIER_PROCESS_ACTION_DSCP_DENY)) {
2453				DEBUG_TRACE("%p: aci: %p, type: %d, DSCP remark wanted, flow_dscp: %u, return dscp: %u\n",
2454						ci, aci, aci->type_get(aci), aci_pr.flow_dscp, aci_pr.return_dscp);
2455				prevalent_pr.process_actions |= ECM_CLASSIFIER_PROCESS_ACTION_DSCP;
2456				prevalent_pr.flow_dscp = aci_pr.flow_dscp;
2457				prevalent_pr.return_dscp = aci_pr.return_dscp;
2458			}
2459		}
2460#endif
2461	}
2462	ecm_db_connection_assignments_release(assignment_count, assignments);
2463
2464	/*
2465	 * Change timer group?
2466	 */
2467	if (ci_orig_timer_group != prevalent_pr.timer_group) {
2468		DEBUG_TRACE("%p: change timer group from: %d to: %d\n", ci, ci_orig_timer_group, prevalent_pr.timer_group);
2469		ecm_db_connection_defunct_timer_reset(ci, prevalent_pr.timer_group);
2470	}
2471
2472	/*
2473	 * Drop?
2474	 */
2475	if (prevalent_pr.drop) {
2476		DEBUG_TRACE("%p: drop: %p\n", ci, skb);
2477		ecm_db_connection_data_totals_update_dropped(ci, (sender == ECM_TRACKER_SENDER_TYPE_SRC)? true : false, skb->len, 1);
2478		ecm_db_connection_deref(ci);
2479		return NF_ACCEPT;
2480	}
2481	ecm_db_connection_data_totals_update(ci, (sender == ECM_TRACKER_SENDER_TYPE_SRC)? true : false, skb->len, 1);
2482
2483	/*
2484	 * Assign qos tag
2485	 * GGG TODO Should we use sender to identify whether to use flow or return qos tag?
2486	 */
2487	skb->priority = prevalent_pr.flow_qos_tag;
2488	DEBUG_TRACE("%p: skb priority: %u\n", ci, skb->priority);
2489
2490	/*
2491	 * Accelerate?
2492	 */
2493	if (prevalent_pr.accel_mode == ECM_CLASSIFIER_ACCELERATION_MODE_ACCEL) {
2494		struct ecm_front_end_connection_instance *feci;
2495		DEBUG_TRACE("%p: accel\n", ci);
2496		feci = ecm_db_connection_front_end_get_and_ref(ci);
2497		ecm_nss_ported_ipv4_connection_accelerate(feci, &prevalent_pr, is_l2_encap, ct);
2498		feci->deref(feci);
2499	}
2500	ecm_db_connection_deref(ci);
2501
2502	return NF_ACCEPT;
2503}
2504
2505/*
2506 * ecm_nss_ported_ipv4_debugfs_init()
2507 */
2508bool ecm_nss_ported_ipv4_debugfs_init(struct dentry *dentry)
2509{
2510	struct dentry *udp_dentry;
2511
2512	udp_dentry = debugfs_create_u32("udp_accelerated_count", S_IRUGO, dentry,
2513						&ecm_nss_ported_ipv4_accelerated_count[ECM_NSS_PORTED_IPV4_PROTO_UDP]);
2514	if (!udp_dentry) {
2515		DEBUG_ERROR("Failed to create ecm nss ipv4 udp_accelerated_count file in debugfs\n");
2516		return false;
2517	}
2518
2519	if (!debugfs_create_u32("tcp_accelerated_count", S_IRUGO, dentry,
2520					&ecm_nss_ported_ipv4_accelerated_count[ECM_NSS_PORTED_IPV4_PROTO_TCP])) {
2521		DEBUG_ERROR("Failed to create ecm nss ipv4 tcp_accelerated_count file in debugfs\n");
2522		debugfs_remove(udp_dentry);
2523		return false;
2524	}
2525
2526	return true;
2527}
2528
2529