1/*
2 **************************************************************************
3 * Copyright (c) 2014-2015 The Linux Foundation.  All rights reserved.
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 **************************************************************************
15 */
16
17#include <linux/version.h>
18#include <linux/types.h>
19#include <linux/ip.h>
20#include <linux/tcp.h>
21#include <linux/module.h>
22#include <linux/skbuff.h>
23#include <linux/icmp.h>
24#include <linux/debugfs.h>
25#include <linux/kthread.h>
26#include <linux/pkt_sched.h>
27#include <linux/string.h>
28#include <net/ip6_route.h>
29#include <net/ip6_fib.h>
30#include <net/addrconf.h>
31#include <net/ipv6.h>
32#include <net/tcp.h>
33#include <asm/unaligned.h>
34#include <asm/uaccess.h>	/* for put_user */
35#include <net/ipv6.h>
36#include <linux/inet.h>
37#include <linux/in6.h>
38#include <linux/udp.h>
39#include <linux/tcp.h>
40#include <linux/inetdevice.h>
41#include <linux/if_arp.h>
42#include <linux/netfilter_ipv6.h>
43#include <linux/netfilter_bridge.h>
44#include <linux/if_bridge.h>
45#include <net/arp.h>
46#include <net/netfilter/nf_conntrack.h>
47#include <net/netfilter/nf_conntrack_acct.h>
48#include <net/netfilter/nf_conntrack_helper.h>
49#include <net/netfilter/nf_conntrack_l4proto.h>
50#include <net/netfilter/nf_conntrack_l3proto.h>
51#include <net/netfilter/nf_conntrack_zones.h>
52#include <net/netfilter/nf_conntrack_core.h>
53#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
54#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
55#ifdef ECM_INTERFACE_VLAN_ENABLE
56#include <linux/../../net/8021q/vlan.h>
57#include <linux/if_vlan.h>
58#endif
59
60/*
61 * Debug output levels
62 * 0 = OFF
63 * 1 = ASSERTS / ERRORS
64 * 2 = 1 + WARN
65 * 3 = 2 + INFO
66 * 4 = 3 + TRACE
67 */
68#define DEBUG_LEVEL ECM_NSS_NON_PORTED_IPV6_DEBUG_LEVEL
69
70#include <nss_api_if.h>
71
72#include "ecm_types.h"
73#include "ecm_db_types.h"
74#include "ecm_state.h"
75#include "ecm_tracker.h"
76#include "ecm_classifier.h"
77#include "ecm_front_end_types.h"
78#include "ecm_tracker_datagram.h"
79#include "ecm_tracker_udp.h"
80#include "ecm_tracker_tcp.h"
81#include "ecm_db.h"
82#include "ecm_classifier_default.h"
83#include "ecm_interface.h"
84#include "ecm_nss_non_ported_ipv6.h"
85#include "ecm_nss_ipv6.h"
86#include "ecm_nss_common.h"
87
88/*
89 * Magic numbers
90 */
91#define ECM_NSS_NON_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC 0xEC34
92
93/*
94 * struct ecm_nss_non_ported_ipv6_connection_instance
95 *	A connection specific front end instance for Non-Ported connections
96 */
97struct ecm_nss_non_ported_ipv6_connection_instance {
98	struct ecm_front_end_connection_instance base;		/* Base class */
99#if (DEBUG_LEVEL > 0)
100	uint16_t magic;
101#endif
102};
103
104static int ecm_nss_non_ported_ipv6_accelerated_count = 0;		/* Number of Non-Ported connections currently offloaded */
105
106/*
107 * ecm_nss_non_ported_ipv6_connection_callback()
108 *	Callback for handling create ack/nack calls.
109 */
110static void ecm_nss_non_ported_ipv6_connection_callback(void *app_data, struct nss_ipv6_msg *nim)
111{
112	struct nss_ipv6_rule_create_msg *nircm = &nim->msg.rule_create;
113	uint32_t serial = (uint32_t)app_data;
114	struct ecm_db_connection_instance *ci;
115	struct ecm_front_end_connection_instance *feci;
116	struct ecm_nss_non_ported_ipv6_connection_instance *nnpci;
117	ip_addr_t flow_ip;
118	ip_addr_t return_ip;
119	ecm_front_end_acceleration_mode_t result_mode;
120
121	/*
122	 * Is this a response to a create message?
123	 */
124	if (nim->cm.type != NSS_IPV6_TX_CREATE_RULE_MSG) {
125		DEBUG_ERROR("%p: non_ported create callback with improper type: %d, serial: %u\n", nim, nim->cm.type, serial);
126		return;
127	}
128
129	/*
130	 * Look up ecm connection so that we can update the status.
131	 */
132	ci = ecm_db_connection_serial_find_and_ref(serial);
133	if (!ci) {
134		DEBUG_TRACE("%p: create callback, connection not found, serial: %u\n", nim, serial);
135		return;
136	}
137
138	/*
139	 * Release ref held for this ack/nack response.
140	 * NOTE: It's okay to do this here, ci won't go away, because the ci is held as
141	 * a result of the ecm_db_connection_serial_find_and_ref()
142	 */
143	ecm_db_connection_deref(ci);
144
145	/*
146	 * Get the front end instance
147	 */
148	feci = ecm_db_connection_front_end_get_and_ref(ci);
149	nnpci = (struct ecm_nss_non_ported_ipv6_connection_instance *)feci;
150	DEBUG_CHECK_MAGIC(nnpci, ECM_NSS_NON_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", nnpci);
151
152	ECM_NSS_IPV6_ADDR_TO_IP_ADDR(flow_ip, nircm->tuple.flow_ip);
153	ECM_NSS_IPV6_ADDR_TO_IP_ADDR(return_ip, nircm->tuple.return_ip);
154
155	/*
156	 * Record command duration
157	 */
158	ecm_nss_ipv6_accel_done_time_update(feci);
159
160	/*
161	 * Dump some useful trace information.
162	 */
163	DEBUG_TRACE("%p: accelerate response for connection: %p, serial: %u\n", nnpci, feci->ci, serial);
164	DEBUG_TRACE("%p: rule_flags: %x, valid_flags: %x\n", nnpci, nircm->rule_flags, nircm->valid_flags);
165	DEBUG_TRACE("%p: flow_ip: " ECM_IP_ADDR_OCTAL_FMT ":%d\n", nnpci, ECM_IP_ADDR_TO_OCTAL(flow_ip), nircm->tuple.flow_ident);
166	DEBUG_TRACE("%p: return_ip: " ECM_IP_ADDR_OCTAL_FMT ":%d\n", nnpci, ECM_IP_ADDR_TO_OCTAL(return_ip), nircm->tuple.return_ident);
167	DEBUG_TRACE("%p: protocol: %d\n", nnpci, nircm->tuple.protocol);
168
169	/*
170	 * Handle the creation result code.
171	 */
172	DEBUG_TRACE("%p: response: %d\n", nnpci, nim->cm.response);
173	if (nim->cm.response != NSS_CMN_RESPONSE_ACK) {
174		/*
175		 * Creation command failed (specific reason ignored).
176		 */
177		DEBUG_TRACE("%p: accel nack: %d\n", nnpci, nim->cm.error);
178		spin_lock_bh(&feci->lock);
179		DEBUG_ASSERT(feci->accel_mode == ECM_FRONT_END_ACCELERATION_MODE_ACCEL_PENDING, "%p: Unexpected mode: %d\n", ci, feci->accel_mode);
180		feci->stats.ae_nack++;
181		feci->stats.ae_nack_total++;
182		if (feci->stats.ae_nack >= feci->stats.ae_nack_limit) {
183			/*
184			 * Too many NSS rejections
185			 */
186			result_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_ACCEL_ENGINE;
187		} else {
188			/*
189			 * Revert to decelerated
190			 */
191			result_mode = ECM_FRONT_END_ACCELERATION_MODE_DECEL;
192		}
193
194		/*
195		 * TODO: Why is this differnt than IPv4?
196		 * Clear any decelerate pending flag since we aren't accelerated anyway we can just clear this whether it is set or not
197		 */
198		feci->stats.decelerate_pending = false;
199
200		/*
201		 * If connection is now defunct then set mode to ensure no further accel attempts occur
202		 */
203		if (feci->is_defunct) {
204			result_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT;
205		}
206
207		spin_lock_bh(&ecm_nss_ipv6_lock);
208		_ecm_nss_ipv6_accel_pending_clear(feci, result_mode);
209		spin_unlock_bh(&ecm_nss_ipv6_lock);
210
211		spin_unlock_bh(&feci->lock);
212
213		/*
214		 * Release the connection.
215		 */
216		feci->deref(feci);
217		ecm_db_connection_deref(ci);
218		return;
219	}
220
221	spin_lock_bh(&feci->lock);
222	DEBUG_ASSERT(feci->accel_mode == ECM_FRONT_END_ACCELERATION_MODE_ACCEL_PENDING, "%p: Unexpected mode: %d\n", ci, feci->accel_mode);
223
224	/*
225	 * If a flush occured before we got the ACK then our acceleration was effectively cancelled on us
226	 * GGG TODO This is a workaround for a NSS message OOO quirk, this should eventually be removed.
227	 */
228	if (feci->stats.flush_happened) {
229		feci->stats.flush_happened = false;
230
231		/*
232		 * Increment the no-action counter.  Our connectin was decelerated on us with no action occurring.
233		 */
234		feci->stats.no_action_seen++;
235
236		spin_lock_bh(&ecm_nss_ipv6_lock);
237		_ecm_nss_ipv6_accel_pending_clear(feci, ECM_FRONT_END_ACCELERATION_MODE_DECEL);
238		spin_unlock_bh(&ecm_nss_ipv6_lock);
239
240		spin_unlock_bh(&feci->lock);
241
242		/*
243		 * Release the connection.
244		 */
245		feci->deref(feci);
246		ecm_db_connection_deref(ci);
247		return;
248	}
249
250	/*
251	 * Create succeeded
252	 */
253
254	/*
255	 * Clear any nack count
256	 */
257	feci->stats.ae_nack = 0;
258
259	/*
260	 * Clear the "accelerate pending" state and move to "accelerated" state bumping
261	 * the accelerated counters to match our new state.
262	 *
263	 * Decelerate may have been attempted while we were "pending accel" and
264	 * this function will return true if that was the case.
265	 * If decelerate was pending then we need to begin deceleration :-(
266	 */
267	spin_lock_bh(&ecm_nss_ipv6_lock);
268
269	ecm_nss_non_ported_ipv6_accelerated_count++;	/* Protocol specific counter */
270	ecm_nss_ipv6_accelerated_count++;				/* General running counter */
271
272	if (!_ecm_nss_ipv6_accel_pending_clear(feci, ECM_FRONT_END_ACCELERATION_MODE_ACCEL)) {
273		/*
274		 * Increment the no-action counter, this is reset if offload action is seen
275		 */
276		feci->stats.no_action_seen++;
277
278		spin_unlock_bh(&ecm_nss_ipv6_lock);
279		spin_unlock_bh(&feci->lock);
280
281		/*
282		 * Release the connection.
283		 */
284		feci->deref(feci);
285		ecm_db_connection_deref(ci);
286		return;
287	}
288
289	DEBUG_INFO("%p: Decelerate was pending\n", ci);
290
291	spin_unlock_bh(&ecm_nss_ipv6_lock);
292	spin_unlock_bh(&feci->lock);
293
294	feci->decelerate(feci);
295
296	/*
297	 * Release the connection.
298	 */
299	feci->deref(feci);
300	ecm_db_connection_deref(ci);
301}
302
303/*
304 * ecm_nss_non_ported_ipv6_connection_accelerate()
305 *	Accelerate a connection
306 *
307 * GGG TODO Refactor this function into a single function that np, udp and tcp
308 * can all use and reduce the amount of code!
309 */
310static void ecm_nss_non_ported_ipv6_connection_accelerate(struct ecm_front_end_connection_instance *feci,
311									struct ecm_classifier_process_response *pr, bool is_l2_encap)
312{
313	struct ecm_nss_non_ported_ipv6_connection_instance *nnpci = (struct ecm_nss_non_ported_ipv6_connection_instance *)feci;
314	uint16_t regen_occurrances;
315	int protocol;
316	int32_t from_ifaces_first;
317	int32_t to_ifaces_first;
318	struct ecm_db_iface_instance *from_ifaces[ECM_DB_IFACE_HEIRARCHY_MAX];
319	struct ecm_db_iface_instance *to_ifaces[ECM_DB_IFACE_HEIRARCHY_MAX];
320	struct ecm_db_iface_instance *from_nss_iface;
321	struct ecm_db_iface_instance *to_nss_iface;
322	int32_t from_nss_iface_id;
323	int32_t to_nss_iface_id;
324	uint8_t from_nss_iface_address[ETH_ALEN];
325	uint8_t to_nss_iface_address[ETH_ALEN];
326	struct nss_ipv6_msg nim;
327	struct nss_ipv6_rule_create_msg *nircm;
328	struct ecm_classifier_instance *assignments[ECM_CLASSIFIER_TYPES];
329	int aci_index;
330	int assignment_count;
331	nss_tx_status_t nss_tx_status;
332	int32_t list_index;
333	int32_t interface_type_counts[ECM_DB_IFACE_TYPE_COUNT];
334	bool rule_invalid;
335	ip_addr_t src_ip;
336	ip_addr_t dest_ip;
337	ecm_front_end_acceleration_mode_t result_mode;
338
339	DEBUG_CHECK_MAGIC(nnpci, ECM_NSS_NON_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", nnpci);
340
341	/*
342	 * Get the re-generation occurrance counter of the connection.
343	 * We compare it again at the end - to ensure that the rule construction has seen no generation
344	 * changes during rule creation.
345	 */
346	regen_occurrances = ecm_db_connection_regeneration_occurrances_get(feci->ci);
347
348	/*
349	 * For non-ported protocols we only support IPv6 in 4 or ESP
350	 */
351	protocol = ecm_db_connection_protocol_get(feci->ci);
352	if ((protocol != IPPROTO_IPIP) && (protocol != IPPROTO_ESP)) {
353		spin_lock_bh(&feci->lock);
354		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_RULE;
355		spin_unlock_bh(&feci->lock);
356		DEBUG_TRACE("%p: unsupported protocol: %d\n", nnpci, protocol);
357		return;
358	}
359
360	/*
361	 * Test if acceleration is permitted
362	 */
363	if (!ecm_nss_ipv6_accel_pending_set(feci)) {
364		DEBUG_TRACE("%p: Acceleration not permitted: %p\n", feci, feci->ci);
365		return;
366	}
367
368	/*
369	 * Okay construct an accel command.
370	 * Initialise creation structure.
371	 * NOTE: We leverage the app_data void pointer to be our 32 bit connection serial number.
372	 * When we get it back we re-cast it to a uint32 and do a faster connection lookup.
373	 */
374	memset(&nim, 0, sizeof(struct nss_ipv6_msg));
375	nss_ipv6_msg_init(&nim, NSS_IPV6_RX_INTERFACE, NSS_IPV6_TX_CREATE_RULE_MSG,
376			sizeof(struct nss_ipv6_rule_create_msg),
377			ecm_nss_non_ported_ipv6_connection_callback,
378			(void *)ecm_db_connection_serial_get(feci->ci));
379
380	nircm = &nim.msg.rule_create;
381	nircm->valid_flags = 0;
382	nircm->rule_flags = 0;
383
384	/*
385	 * Initialize VLAN tag information
386	 */
387	nircm->vlan_primary_rule.ingress_vlan_tag = ECM_NSS_CONNMGR_VLAN_ID_NOT_CONFIGURED;
388	nircm->vlan_primary_rule.egress_vlan_tag = ECM_NSS_CONNMGR_VLAN_ID_NOT_CONFIGURED;
389	nircm->vlan_secondary_rule.ingress_vlan_tag = ECM_NSS_CONNMGR_VLAN_ID_NOT_CONFIGURED;
390	nircm->vlan_secondary_rule.egress_vlan_tag = ECM_NSS_CONNMGR_VLAN_ID_NOT_CONFIGURED;
391
392	/*
393	 * Get the interface lists of the connection, we must have at least one interface in the list to continue
394	 */
395	from_ifaces_first = ecm_db_connection_from_interfaces_get_and_ref(feci->ci, from_ifaces);
396	if (from_ifaces_first == ECM_DB_IFACE_HEIRARCHY_MAX) {
397		DEBUG_WARN("%p: Accel attempt failed - no interfaces in from_interfaces list!\n", nnpci);
398		goto non_ported_accel_bad_rule;
399	}
400
401	to_ifaces_first = ecm_db_connection_to_interfaces_get_and_ref(feci->ci, to_ifaces);
402	if (to_ifaces_first == ECM_DB_IFACE_HEIRARCHY_MAX) {
403		DEBUG_WARN("%p: Accel attempt failed - no interfaces in to_interfaces list!\n", nnpci);
404		ecm_db_connection_interfaces_deref(from_ifaces, from_ifaces_first);
405		goto non_ported_accel_bad_rule;
406	}
407
408	/*
409	 * First interface in each must be a known nss interface
410	 */
411	from_nss_iface = from_ifaces[from_ifaces_first];
412	to_nss_iface = to_ifaces[to_ifaces_first];
413	from_nss_iface_id = ecm_db_iface_ae_interface_identifier_get(from_nss_iface);
414	to_nss_iface_id = ecm_db_iface_ae_interface_identifier_get(to_nss_iface);
415	if ((from_nss_iface_id < 0) || (to_nss_iface_id < 0)) {
416		DEBUG_TRACE("%p: from_nss_iface_id: %d, to_nss_iface_id: %d\n", nnpci, from_nss_iface_id, to_nss_iface_id);
417		ecm_db_connection_interfaces_deref(from_ifaces, from_ifaces_first);
418		ecm_db_connection_interfaces_deref(to_ifaces, to_ifaces_first);
419		goto non_ported_accel_bad_rule;
420	}
421
422	/*
423	 * New rule being created
424	 */
425	nircm->valid_flags |= NSS_IPV6_RULE_CREATE_CONN_VALID;
426
427	/*
428	 * Set interface numbers involved in accelerating this connection.
429	 * These are the outer facing addresses from the heirarchy interface lists we got above.
430	 * These may be overridden later if we detect special interface types e.g. ipsec.
431	 */
432	nircm->conn_rule.flow_interface_num = from_nss_iface_id;
433	nircm->conn_rule.return_interface_num = to_nss_iface_id;
434
435	/*
436	 * We know that each outward facing interface is known to the NSS and so this connection could be accelerated.
437	 * However the lists may also specify other interesting details that must be included in the creation command,
438	 * for example, ethernet MAC, VLAN tagging or PPPoE session information.
439	 * We get this information by walking from the outer to the innermost interface for each list and examine the interface types.
440	 *
441	 * Start with the 'from' (src) side.
442	 * NOTE: The lists may contain a complex heirarchy of similar type of interface e.g. multiple vlans or tunnels within tunnels.
443	 * This NSS cannot handle that - there is no way to describe this in the rule - if we see multiple types that would conflict we have to abort.
444	 */
445	DEBUG_TRACE("%p: Examine from/src heirarchy list\n", nnpci);
446	memset(interface_type_counts, 0, sizeof(interface_type_counts));
447	rule_invalid = false;
448	for (list_index = from_ifaces_first; !rule_invalid && (list_index < ECM_DB_IFACE_HEIRARCHY_MAX); list_index++) {
449		struct ecm_db_iface_instance *ii;
450		ecm_db_iface_type_t ii_type;
451		char *ii_name;
452
453		ii = from_ifaces[list_index];
454		ii_type = ecm_db_connection_iface_type_get(ii);
455		ii_name = ecm_db_interface_type_to_string(ii_type);
456		DEBUG_TRACE("%p: list_index: %d, ii: %p, type: %d (%s)\n", nnpci, list_index, ii, ii_type, ii_name);
457
458		/*
459		 * Extract information from this interface type if it is applicable to the rule.
460		 * Conflicting information may cause accel to be unsupported.
461		 */
462		switch (ii_type) {
463#ifdef ECM_INTERFACE_PPP_ENABLE
464			struct ecm_db_interface_info_pppoe pppoe_info;
465#endif
466#ifdef ECM_INTERFACE_VLAN_ENABLE
467			struct ecm_db_interface_info_vlan vlan_info;
468			uint32_t vlan_value = 0;
469			struct net_device *vlan_in_dev = NULL;
470#endif
471
472		case ECM_DB_IFACE_TYPE_BRIDGE:
473			DEBUG_TRACE("%p: Bridge\n", nnpci);
474			if (interface_type_counts[ii_type] != 0) {
475				/*
476				 * Cannot cascade bridges
477				 */
478				rule_invalid = true;
479				DEBUG_TRACE("%p: Bridge - ignore additional\n", nnpci);
480				break;
481			}
482			ecm_db_iface_bridge_address_get(ii, from_nss_iface_address);
483			DEBUG_TRACE("%p: Bridge - mac: %pM\n", nnpci, from_nss_iface_address);
484			break;
485		case ECM_DB_IFACE_TYPE_ETHERNET:
486			DEBUG_TRACE("%p: Ethernet\n", nnpci);
487			if (interface_type_counts[ii_type] != 0) {
488				/*
489				 * Ignore additional mac addresses, these are usually as a result of address propagation
490				 * from bridges down to ports etc.
491				 */
492				DEBUG_TRACE("%p: Ethernet - ignore additional\n", nnpci);
493				break;
494			}
495
496			/*
497			 * Can only handle one MAC, the first outermost mac.
498			 */
499			ecm_db_iface_ethernet_address_get(ii, from_nss_iface_address);
500			DEBUG_TRACE("%p: Ethernet - mac: %pM\n", nnpci, from_nss_iface_address);
501			break;
502		case ECM_DB_IFACE_TYPE_PPPOE:
503#ifdef ECM_INTERFACE_PPP_ENABLE
504			/*
505			 * More than one PPPoE in the list is not valid!
506			 */
507			if (interface_type_counts[ii_type] != 0) {
508				DEBUG_TRACE("%p: PPPoE - additional unsupported\n", nnpci);
509				rule_invalid = true;
510				break;
511			}
512
513			/*
514			 * Copy pppoe session info to the creation structure.
515			 */
516			ecm_db_iface_pppoe_session_info_get(ii, &pppoe_info);
517
518			nircm->pppoe_rule.flow_pppoe_session_id = pppoe_info.pppoe_session_id;
519			memcpy(nircm->pppoe_rule.flow_pppoe_remote_mac, pppoe_info.remote_mac, ETH_ALEN);
520			nircm->valid_flags |= NSS_IPV6_RULE_CREATE_PPPOE_VALID;
521
522			DEBUG_TRACE("%p: PPPoE - session: %x, mac: %pM\n", nnpci,
523					nircm->pppoe_rule.flow_pppoe_session_id,
524					nircm->pppoe_rule.flow_pppoe_remote_mac);
525#else
526			rule_invalid = true;
527#endif
528			break;
529		case ECM_DB_IFACE_TYPE_VLAN:
530#ifdef ECM_INTERFACE_VLAN_ENABLE
531			DEBUG_TRACE("%p: VLAN\n", nnpci);
532			if (interface_type_counts[ii_type] > 1) {
533				/*
534				 * Can only support two vlans
535				 */
536				rule_invalid = true;
537				DEBUG_TRACE("%p: VLAN - additional unsupported\n", nnpci);
538				break;
539			}
540			ecm_db_iface_vlan_info_get(ii, &vlan_info);
541			vlan_value = ((vlan_info.vlan_tpid << 16) | vlan_info.vlan_tag);
542
543			/*
544			 * Look up the vlan device and incorporate the vlan priority into the vlan_value
545			 */
546			vlan_in_dev = dev_get_by_index(&init_net, ecm_db_iface_interface_identifier_get(ii));
547			if (vlan_in_dev) {
548				vlan_value |= vlan_dev_get_egress_prio(vlan_in_dev, pr->return_qos_tag);
549				dev_put(vlan_in_dev);
550				vlan_in_dev = NULL;
551			}
552
553			/*
554			 * Primary or secondary (QinQ) VLAN?
555			 */
556			if (interface_type_counts[ii_type] == 0) {
557				nircm->vlan_primary_rule.ingress_vlan_tag = vlan_value;
558			} else {
559				nircm->vlan_secondary_rule.ingress_vlan_tag = vlan_value;
560			}
561			nircm->valid_flags |= NSS_IPV6_RULE_CREATE_VLAN_VALID;
562
563			/*
564			 * If we have not yet got an ethernet mac then take this one (very unlikely as mac should have been propagated to the slave (outer) device
565			 */
566			if (interface_type_counts[ECM_DB_IFACE_TYPE_ETHERNET] == 0) {
567				memcpy(from_nss_iface_address, vlan_info.address, ETH_ALEN);
568				interface_type_counts[ECM_DB_IFACE_TYPE_ETHERNET]++;
569				DEBUG_TRACE("%p: VLAN use mac: %pM\n", nnpci, from_nss_iface_address);
570			}
571			DEBUG_TRACE("%p: vlan tag: %x\n", nnpci, vlan_value);
572#else
573			rule_invalid = true;
574			DEBUG_TRACE("%p: VLAN - unsupported\n", nnpci);
575#endif
576			break;
577		case ECM_DB_IFACE_TYPE_IPSEC_TUNNEL:
578#ifdef ECM_INTERFACE_IPSEC_ENABLE
579			DEBUG_TRACE("%p: IPSEC\n", nnpci);
580			if (interface_type_counts[ii_type] != 0) {
581				/*
582				 * Can only support one ipsec
583				 */
584				rule_invalid = true;
585				DEBUG_TRACE("%p: IPSEC - additional unsupported\n", nnpci);
586				break;
587			}
588			nircm->conn_rule.flow_interface_num = NSS_C2C_TX_INTERFACE;
589#else
590			rule_invalid = true;
591			DEBUG_TRACE("%p: IPSEC - unsupported\n", nnpci);
592#endif
593			break;
594		default:
595			DEBUG_TRACE("%p: Ignoring: %d (%s)\n", nnpci, ii_type, ii_name);
596		}
597
598		/*
599		 * Seen an interface of this type
600		 */
601		interface_type_counts[ii_type]++;
602	}
603	if (rule_invalid) {
604		DEBUG_WARN("%p: from/src Rule invalid\n", nnpci);
605		ecm_db_connection_interfaces_deref(from_ifaces, from_ifaces_first);
606		ecm_db_connection_interfaces_deref(to_ifaces, to_ifaces_first);
607		goto non_ported_accel_bad_rule;
608	}
609
610	/*
611	 * Now examine the TO / DEST heirarchy list to construct the destination part of the rule
612	 */
613	DEBUG_TRACE("%p: Examine to/dest heirarchy list\n", nnpci);
614	memset(interface_type_counts, 0, sizeof(interface_type_counts));
615	rule_invalid = false;
616	for (list_index = to_ifaces_first; !rule_invalid && (list_index < ECM_DB_IFACE_HEIRARCHY_MAX); list_index++) {
617		struct ecm_db_iface_instance *ii;
618		ecm_db_iface_type_t ii_type;
619		char *ii_name;
620
621		ii = to_ifaces[list_index];
622		ii_type = ecm_db_connection_iface_type_get(ii);
623		ii_name = ecm_db_interface_type_to_string(ii_type);
624		DEBUG_TRACE("%p: list_index: %d, ii: %p, type: %d (%s)\n", nnpci, list_index, ii, ii_type, ii_name);
625
626		/*
627		 * Extract information from this interface type if it is applicable to the rule.
628		 * Conflicting information may cause accel to be unsupported.
629		 */
630		switch (ii_type) {
631#ifdef ECM_INTERFACE_PPP_ENABLE
632			struct ecm_db_interface_info_pppoe pppoe_info;
633#endif
634#ifdef ECM_INTERFACE_VLAN_ENABLE
635			struct ecm_db_interface_info_vlan vlan_info;
636			uint32_t vlan_value = 0;
637			struct net_device *vlan_out_dev = NULL;
638#endif
639		case ECM_DB_IFACE_TYPE_BRIDGE:
640			DEBUG_TRACE("%p: Bridge\n", nnpci);
641			if (interface_type_counts[ii_type] != 0) {
642				/*
643				 * Cannot cascade bridges
644				 */
645				rule_invalid = true;
646				DEBUG_TRACE("%p: Bridge - ignore additional\n", nnpci);
647				break;
648			}
649			ecm_db_iface_bridge_address_get(ii, to_nss_iface_address);
650			DEBUG_TRACE("%p: Bridge - mac: %pM\n", nnpci, to_nss_iface_address);
651			break;
652		case ECM_DB_IFACE_TYPE_ETHERNET:
653			DEBUG_TRACE("%p: Ethernet\n", nnpci);
654			if (interface_type_counts[ii_type] != 0) {
655				/*
656				 * Ignore additional mac addresses, these are usually as a result of address propagation
657				 * from bridges down to ports etc.
658				 */
659				DEBUG_TRACE("%p: Ethernet - ignore additional\n", nnpci);
660				break;
661			}
662
663			/*
664			 * Can only handle one MAC, the first outermost mac.
665			 */
666			ecm_db_iface_ethernet_address_get(ii, to_nss_iface_address);
667			DEBUG_TRACE("%p: Ethernet - mac: %pM\n", nnpci, to_nss_iface_address);
668			break;
669		case ECM_DB_IFACE_TYPE_PPPOE:
670#ifdef ECM_INTERFACE_PPP_ENABLE
671			/*
672			 * More than one PPPoE in the list is not valid!
673			 */
674			if (interface_type_counts[ii_type] != 0) {
675				DEBUG_TRACE("%p: PPPoE - additional unsupported\n", nnpci);
676				rule_invalid = true;
677				break;
678			}
679
680			/*
681			 * Copy pppoe session info to the creation structure.
682			 */
683			ecm_db_iface_pppoe_session_info_get(ii, &pppoe_info);
684			nircm->pppoe_rule.return_pppoe_session_id = pppoe_info.pppoe_session_id;
685			memcpy(nircm->pppoe_rule.return_pppoe_remote_mac, pppoe_info.remote_mac, ETH_ALEN);
686			nircm->valid_flags |= NSS_IPV6_RULE_CREATE_PPPOE_VALID;
687
688			DEBUG_TRACE("%p: PPPoE - session: %x, mac: %pM\n", nnpci,
689				    nircm->pppoe_rule.return_pppoe_session_id,
690				    nircm->pppoe_rule.return_pppoe_remote_mac);
691#else
692			rule_invalid = true;
693#endif
694			break;
695		case ECM_DB_IFACE_TYPE_VLAN:
696#ifdef ECM_INTERFACE_VLAN_ENABLE
697			DEBUG_TRACE("%p: VLAN\n", nnpci);
698			if (interface_type_counts[ii_type] > 1) {
699				/*
700				 * Can only support two vlans
701				 */
702				rule_invalid = true;
703				DEBUG_TRACE("%p: VLAN - additional unsupported\n", nnpci);
704				break;
705			}
706			ecm_db_iface_vlan_info_get(ii, &vlan_info);
707			vlan_value = ((vlan_info.vlan_tpid << 16) | vlan_info.vlan_tag);
708
709			/*
710			 * Look up the vlan device and incorporate the vlan priority into the vlan_value
711			 */
712			vlan_out_dev = dev_get_by_index(&init_net, ecm_db_iface_interface_identifier_get(ii));
713			if (vlan_out_dev) {
714				vlan_value |= vlan_dev_get_egress_prio(vlan_out_dev, pr->flow_qos_tag);
715				dev_put(vlan_out_dev);
716				vlan_out_dev = NULL;
717			}
718
719			/*
720			 * Primary or secondary (QinQ) VLAN?
721			 */
722			if (interface_type_counts[ii_type] == 0) {
723				nircm->vlan_primary_rule.egress_vlan_tag = vlan_value;
724			} else {
725				nircm->vlan_secondary_rule.egress_vlan_tag = vlan_value;
726			}
727			nircm->valid_flags |= NSS_IPV6_RULE_CREATE_VLAN_VALID;
728
729			/*
730			 * If we have not yet got an ethernet mac then take this one (very unlikely as mac should have been propagated to the slave (outer) device
731			 */
732			if (interface_type_counts[ECM_DB_IFACE_TYPE_ETHERNET] == 0) {
733				memcpy(to_nss_iface_address, vlan_info.address, ETH_ALEN);
734				interface_type_counts[ECM_DB_IFACE_TYPE_ETHERNET]++;
735				DEBUG_TRACE("%p: VLAN use mac: %pM\n", nnpci, to_nss_iface_address);
736			}
737			DEBUG_TRACE("%p: vlan tag: %x\n", nnpci, vlan_value);
738#else
739			rule_invalid = true;
740			DEBUG_TRACE("%p: VLAN - unsupported\n", nnpci);
741#endif
742			break;
743		case ECM_DB_IFACE_TYPE_IPSEC_TUNNEL:
744#ifdef ECM_INTERFACE_IPSEC_ENABLE
745			DEBUG_TRACE("%p: IPSEC\n", nnpci);
746			if (interface_type_counts[ii_type] != 0) {
747				/*
748				 * Can only support one ipsec
749				 */
750				rule_invalid = true;
751				DEBUG_TRACE("%p: IPSEC - additional unsupported\n", nnpci);
752				break;
753			}
754			nircm->conn_rule.return_interface_num = NSS_C2C_TX_INTERFACE;
755#else
756			rule_invalid = true;
757			DEBUG_TRACE("%p: IPSEC - unsupported\n", nnpci);
758#endif
759			break;
760		default:
761			DEBUG_TRACE("%p: Ignoring: %d (%s)\n", nnpci, ii_type, ii_name);
762		}
763
764		/*
765		 * Seen an interface of this type
766		 */
767		interface_type_counts[ii_type]++;
768	}
769	if (rule_invalid) {
770		DEBUG_WARN("%p: to/dest Rule invalid\n", nnpci);
771		ecm_db_connection_interfaces_deref(from_ifaces, from_ifaces_first);
772		ecm_db_connection_interfaces_deref(to_ifaces, to_ifaces_first);
773		goto non_ported_accel_bad_rule;
774	}
775
776	/*
777	 * Routed or bridged?
778	 */
779	if (ecm_db_connection_is_routed_get(feci->ci)) {
780		nircm->rule_flags |= NSS_IPV6_RULE_CREATE_FLAG_ROUTED;
781	} else {
782		nircm->rule_flags |= NSS_IPV6_RULE_CREATE_FLAG_BRIDGE_FLOW;
783		if (is_l2_encap) {
784			nircm->rule_flags |= NSS_IPV6_RULE_CREATE_FLAG_L2_ENCAP;
785		}
786	}
787
788	/*
789	 * Set up the flow and return qos tags
790	 */
791	nircm->qos_rule.flow_qos_tag = (uint32_t)pr->flow_qos_tag;
792	nircm->qos_rule.return_qos_tag = (uint32_t)pr->return_qos_tag;
793	nircm->valid_flags |= NSS_IPV6_RULE_CREATE_QOS_VALID;
794
795#ifdef ECM_CLASSIFIER_DSCP_ENABLE
796	/*
797	 * DSCP information?
798	 */
799	if (pr->process_actions & ECM_CLASSIFIER_PROCESS_ACTION_DSCP) {
800		nircm->dscp_rule.flow_dscp = pr->flow_dscp;
801		nircm->dscp_rule.return_dscp = pr->return_dscp;
802		nircm->rule_flags |= NSS_IPV6_RULE_CREATE_FLAG_DSCP_MARKING;
803		nircm->valid_flags |= NSS_IPV6_RULE_CREATE_DSCP_MARKING_VALID;
804	}
805#endif
806	/*
807	 * Set protocol
808	 */
809	nircm->tuple.protocol = (int32_t)protocol;
810
811	/*
812	 * The flow_ip is where the connection established from
813	 */
814	ecm_db_connection_from_address_get(feci->ci, src_ip);
815	ECM_IP_ADDR_TO_NSS_IPV6_ADDR(nircm->tuple.flow_ip, src_ip);
816
817	/*
818	 * The return_ip is where the connection is established to
819	 */
820	ecm_db_connection_to_address_get(feci->ci, dest_ip);
821	ECM_IP_ADDR_TO_NSS_IPV6_ADDR(nircm->tuple.return_ip, dest_ip);
822
823	/*
824	 * Same approach as above for port information
825	 */
826	nircm->tuple.flow_ident = ecm_db_connection_from_port_get(feci->ci);
827	nircm->tuple.return_ident = ecm_db_connection_to_port_nat_get(feci->ci);
828
829	/*
830	 * Get mac addresses.
831	 * The src_mac is the mac address of the node that established the connection.
832	 * This will work whether the from_node is LAN (egress) or WAN (ingress).
833	 */
834	ecm_db_connection_from_node_address_get(feci->ci, (uint8_t *)nircm->conn_rule.flow_mac);
835
836	/*
837	 * The dest_mac is the mac address of the node that the connection is esatblished to.
838	 */
839	ecm_db_connection_to_nat_node_address_get(feci->ci, (uint8_t *)nircm->conn_rule.return_mac);
840
841	/*
842	 * Get MTU information
843	 */
844	nircm->conn_rule.flow_mtu = (uint32_t)ecm_db_connection_from_iface_mtu_get(feci->ci);
845	nircm->conn_rule.return_mtu = (uint32_t)ecm_db_connection_to_iface_mtu_get(feci->ci);
846
847	/*
848	 * Sync our creation command from the assigned classifiers to get specific additional creation rules.
849	 * NOTE: These are called in ascending order of priority and so the last classifier (highest) shall
850	 * override any preceding classifiers.
851	 * This also gives the classifiers a chance to see that acceleration is being attempted.
852	 */
853	assignment_count = ecm_db_connection_classifier_assignments_get_and_ref(feci->ci, assignments);
854	for (aci_index = 0; aci_index < assignment_count; ++aci_index) {
855		struct ecm_classifier_instance *aci;
856		struct ecm_classifier_rule_create ecrc;
857		/*
858		 * NOTE: The current classifiers do not sync anything to the underlying accel engines.
859		 * In the future, if any of the classifiers wants to pass any parameter, these parameters
860		 * should be received via this object and copied to the accel engine's create object (nircm).
861		*/
862		aci = assignments[aci_index];
863		DEBUG_TRACE("%p: sync from: %p, type: %d\n", nnpci, aci, aci->type_get(aci));
864		aci->sync_from_v6(aci, &ecrc);
865	}
866	ecm_db_connection_assignments_release(assignment_count, assignments);
867
868	/*
869	 * Release the interface lists
870	 */
871	ecm_db_connection_interfaces_deref(from_ifaces, from_ifaces_first);
872	ecm_db_connection_interfaces_deref(to_ifaces, to_ifaces_first);
873
874	DEBUG_INFO("%p: NON_PORTED Accelerate connection %p\n"
875			"Protocol: %d\n"
876			"from_mtu: %u\n"
877			"to_mtu: %u\n"
878			"from_ip: " ECM_IP_ADDR_OCTAL_FMT ":%d\n"
879			"to_ip: " ECM_IP_ADDR_OCTAL_FMT ":%d\n"
880			"from_mac: %pM\n"
881			"to_mac: %pM\n"
882			"src_iface_num: %u\n"
883			"dest_iface_num: %u\n"
884			"ingress_inner_vlan_tag: %u\n"
885			"egress_inner_vlan_tag: %u\n"
886			"ingress_outer_vlan_tag: %u\n"
887			"egress_outer_vlan_tag: %u\n"
888			"rule_flags: %x\n"
889			"valid_flags: %x\n"
890			"return_pppoe_session_id: %u\n"
891			"return_pppoe_remote_mac: %pM\n"
892			"flow_pppoe_session_id: %u\n"
893			"flow_pppoe_remote_mac: %pM\n"
894			"flow_qos_tag: %x (%u)\n"
895			"return_qos_tag: %x (%u)\n"
896			"flow_dscp: %x\n"
897			"return_dscp: %x\n",
898			nnpci,
899			feci->ci,
900			nircm->tuple.protocol,
901			nircm->conn_rule.flow_mtu,
902			nircm->conn_rule.return_mtu,
903			ECM_IP_ADDR_TO_OCTAL(src_ip), nircm->tuple.flow_ident,
904			ECM_IP_ADDR_TO_OCTAL(dest_ip), nircm->tuple.return_ident,
905			nircm->conn_rule.flow_mac,
906			nircm->conn_rule.return_mac,
907			nircm->conn_rule.flow_interface_num,
908			nircm->conn_rule.return_interface_num,
909			nircm->vlan_primary_rule.ingress_vlan_tag,
910			nircm->vlan_primary_rule.egress_vlan_tag,
911			nircm->vlan_secondary_rule.ingress_vlan_tag,
912			nircm->vlan_secondary_rule.egress_vlan_tag,
913			nircm->rule_flags,
914			nircm->valid_flags,
915			nircm->pppoe_rule.return_pppoe_session_id,
916			nircm->pppoe_rule.return_pppoe_remote_mac,
917			nircm->pppoe_rule.flow_pppoe_session_id,
918			nircm->pppoe_rule.flow_pppoe_remote_mac,
919			nircm->qos_rule.flow_qos_tag, nircm->qos_rule.flow_qos_tag,
920			nircm->qos_rule.return_qos_tag, nircm->qos_rule.return_qos_tag,
921			nircm->dscp_rule.flow_dscp,
922			nircm->dscp_rule.return_dscp);
923
924	/*
925	 * Now that the rule has been constructed we re-compare the generation occurrance counter.
926	 * If there has been a change then we abort because the rule may have been created using
927	 * unstable data - especially if another thread has begun regeneration of the connection state.
928	 * NOTE: This does not prevent a regen from being flagged immediately after this line of code either,
929	 * or while the acceleration rule is in flight to the nss.
930	 * This is only to check for consistency of rule state - not that the state is stale.
931	 * Remember that the connection is marked as "accel pending state" so if a regen is flagged immediately
932	 * after this check passes, the connection will be decelerated and refreshed very quickly.
933	 */
934	if (regen_occurrances != ecm_db_connection_regeneration_occurrances_get(feci->ci)) {
935		DEBUG_INFO("%p: connection:%p regen occurred - aborting accel rule.\n", feci, feci->ci);
936		ecm_nss_ipv6_accel_pending_clear(feci, ECM_FRONT_END_ACCELERATION_MODE_DECEL);
937		return;
938	}
939
940	/*
941	 * Ref the connection before issuing an NSS rule
942	 * This ensures that when the NSS responds to the command - which may even be immediately -
943	 * the callback function can trust the correct ref was taken for its purpose.
944	 * NOTE: remember that this will also implicitly hold the feci.
945	 */
946	ecm_db_connection_ref(feci->ci);
947
948	/*
949	 * We are about to issue the command, record the time of transmission
950	 */
951	spin_lock_bh(&feci->lock);
952	feci->stats.cmd_time_begun = jiffies;
953	spin_unlock_bh(&feci->lock);
954
955	/*
956	 * Call the rule create function
957	 */
958	nss_tx_status = nss_ipv6_tx(ecm_nss_ipv6_nss_ipv6_mgr, &nim);
959	if (nss_tx_status == NSS_TX_SUCCESS) {
960		/*
961		 * Reset the driver_fail count - transmission was okay here.
962		 */
963		spin_lock_bh(&feci->lock);
964		feci->stats.driver_fail = 0;
965		spin_unlock_bh(&feci->lock);
966		return;
967	}
968
969	/*
970	 * Release that ref!
971	 */
972	ecm_db_connection_deref(feci->ci);
973
974	/*
975	 * TX failed
976	 */
977	spin_lock_bh(&feci->lock);
978	DEBUG_ASSERT(feci->accel_mode == ECM_FRONT_END_ACCELERATION_MODE_ACCEL_PENDING, "%p: Accel mode unexpected: %d\n", nnpci, feci->accel_mode);
979	feci->stats.driver_fail_total++;
980	feci->stats.driver_fail++;
981	if (feci->stats.driver_fail >= feci->stats.driver_fail_limit) {
982		DEBUG_WARN("%p: Accel failed - driver fail limit\n", nnpci);
983		result_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DRIVER;
984	} else {
985		result_mode = ECM_FRONT_END_ACCELERATION_MODE_DECEL;
986	}
987
988	spin_lock_bh(&ecm_nss_ipv6_lock);
989	_ecm_nss_ipv6_accel_pending_clear(feci, result_mode);
990	spin_unlock_bh(&ecm_nss_ipv6_lock);
991
992	spin_unlock_bh(&feci->lock);
993	return;
994
995non_ported_accel_bad_rule:
996	;
997
998	/*
999	 * Jump to here when rule data is bad and an offload command cannot be constructed
1000	 */
1001	DEBUG_WARN("%p: Accel failed - bad rule\n", nnpci);
1002	ecm_nss_ipv6_accel_pending_clear(feci, ECM_FRONT_END_ACCELERATION_MODE_FAIL_RULE);
1003}
1004
1005/*
1006 * ecm_nss_non_ported_ipv6_connection_destroy_callback()
1007 *	Callback for handling destroy ack/nack calls.
1008 */
1009static void ecm_nss_non_ported_ipv6_connection_destroy_callback(void *app_data, struct nss_ipv6_msg *nim)
1010{
1011	struct nss_ipv6_rule_destroy_msg *nirdm = &nim->msg.rule_destroy;
1012	uint32_t serial = (uint32_t)app_data;
1013	struct ecm_db_connection_instance *ci;
1014	struct ecm_front_end_connection_instance *feci;
1015	struct ecm_nss_non_ported_ipv6_connection_instance *nnpci;
1016	ip_addr_t flow_ip;
1017	ip_addr_t return_ip;
1018
1019	/*
1020	 * Is this a response to a destroy message?
1021	 */
1022	if (nim->cm.type != NSS_IPV6_TX_DESTROY_RULE_MSG) {
1023		DEBUG_ERROR("%p: non_ported destroy callback with improper type: %d\n", nim, nim->cm.type);
1024		return;
1025	}
1026
1027	/*
1028	 * Look up ecm connection so that we can update the status.
1029	 */
1030	ci = ecm_db_connection_serial_find_and_ref(serial);
1031	if (!ci) {
1032		DEBUG_TRACE("%p: destroy callback, connection not found, serial: %u\n", nim, serial);
1033		return;
1034	}
1035
1036	/*
1037	 * Release ref held for this ack/nack response.
1038	 * NOTE: It's okay to do this here, ci won't go away, because the ci is held as
1039	 * a result of the ecm_db_connection_serial_find_and_ref()
1040	 */
1041	ecm_db_connection_deref(ci);
1042
1043	/*
1044	 * Get the front end instance
1045	 */
1046	feci = ecm_db_connection_front_end_get_and_ref(ci);
1047	nnpci = (struct ecm_nss_non_ported_ipv6_connection_instance *)feci;
1048	DEBUG_CHECK_MAGIC(nnpci, ECM_NSS_NON_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", nnpci);
1049
1050	ECM_NSS_IPV6_ADDR_TO_IP_ADDR(flow_ip, nirdm->tuple.flow_ip);
1051	ECM_NSS_IPV6_ADDR_TO_IP_ADDR(return_ip, nirdm->tuple.return_ip);
1052
1053	/*
1054	 * Record command duration
1055	 */
1056	ecm_nss_ipv6_decel_done_time_update(feci);
1057
1058	/*
1059	 * Dump some useful trace information.
1060	 */
1061	DEBUG_TRACE("%p: decelerate response for connection: %p\n", nnpci, feci->ci);
1062	DEBUG_TRACE("%p: flow_ip: " ECM_IP_ADDR_OCTAL_FMT ":%d\n", nnpci, ECM_IP_ADDR_TO_OCTAL(flow_ip), nirdm->tuple.flow_ident);
1063	DEBUG_TRACE("%p: return_ip: " ECM_IP_ADDR_OCTAL_FMT ":%d\n", nnpci, ECM_IP_ADDR_TO_OCTAL(return_ip), nirdm->tuple.return_ident);
1064	DEBUG_TRACE("%p: protocol: %d\n", nnpci, nirdm->tuple.protocol);
1065
1066	/*
1067	 * Drop decel pending counter
1068	 */
1069	spin_lock_bh(&ecm_nss_ipv6_lock);
1070	ecm_nss_ipv6_pending_decel_count--;
1071	DEBUG_ASSERT(ecm_nss_ipv6_pending_decel_count >= 0, "Bad decel pending counter\n");
1072	spin_unlock_bh(&ecm_nss_ipv6_lock);
1073
1074	spin_lock_bh(&feci->lock);
1075
1076	/*
1077	 * If decel is not still pending then it's possible that the NSS ended acceleration by some other reason e.g. flush
1078	 * In which case we cannot rely on the response we get here.
1079	 */
1080	if (feci->accel_mode != ECM_FRONT_END_ACCELERATION_MODE_DECEL_PENDING) {
1081		spin_unlock_bh(&feci->lock);
1082
1083		/*
1084		 * Release the connections.
1085		 */
1086		feci->deref(feci);
1087		ecm_db_connection_deref(ci);
1088		return;
1089	}
1090
1091	DEBUG_TRACE("%p: response: %d\n", nnpci, nim->cm.response);
1092	if (nim->cm.response != NSS_CMN_RESPONSE_ACK) {
1093		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DECEL;
1094	} else {
1095		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_DECEL;
1096	}
1097
1098	/*
1099	 * If connection became defunct then set mode so that no further accel/decel attempts occur.
1100	 */
1101	if (feci->is_defunct) {
1102		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT;
1103	}
1104	spin_unlock_bh(&feci->lock);
1105
1106	/*
1107	 * NON_PORTED acceleration ends
1108	 */
1109	spin_lock_bh(&ecm_nss_ipv6_lock);
1110	ecm_nss_non_ported_ipv6_accelerated_count--;	/* Protocol specific counter */
1111	DEBUG_ASSERT(ecm_nss_non_ported_ipv6_accelerated_count >= 0, "Bad non_ported accel counter\n");
1112	ecm_nss_ipv6_accelerated_count--;		/* General running counter */
1113	DEBUG_ASSERT(ecm_nss_ipv6_accelerated_count >= 0, "Bad accel counter\n");
1114	spin_unlock_bh(&ecm_nss_ipv6_lock);
1115
1116	/*
1117	 * Release the connections.
1118	 */
1119	feci->deref(feci);
1120	ecm_db_connection_deref(ci);
1121}
1122
1123/*
1124 * ecm_nss_non_ported_ipv6_connection_decelerate()
1125 *	Decelerate a connection
1126 */
1127static void ecm_nss_non_ported_ipv6_connection_decelerate(struct ecm_front_end_connection_instance *feci)
1128{
1129	struct ecm_nss_non_ported_ipv6_connection_instance *nnpci = (struct ecm_nss_non_ported_ipv6_connection_instance *)feci;
1130	struct nss_ipv6_msg nim;
1131	struct nss_ipv6_rule_destroy_msg *nirdm;
1132	ip_addr_t src_ip;
1133	ip_addr_t dest_ip;
1134	nss_tx_status_t nss_tx_status;
1135	int protocol;
1136
1137	DEBUG_CHECK_MAGIC(nnpci, ECM_NSS_NON_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", nnpci);
1138
1139	/*
1140	 * For non-ported protocols we only support IPIP.
1141	 */
1142	protocol = ecm_db_connection_protocol_get(feci->ci);
1143	if ((protocol != IPPROTO_IPIP)) {
1144		DEBUG_TRACE("%p: unsupported protocol: %d\n", nnpci, protocol);
1145		return;
1146	}
1147
1148	/*
1149	 * If decelerate is in error or already pending then ignore
1150	 */
1151	spin_lock_bh(&feci->lock);
1152	if (feci->stats.decelerate_pending) {
1153		spin_unlock_bh(&feci->lock);
1154		return;
1155	}
1156
1157	/*
1158	 * If acceleration is pending then we cannot decelerate right now or we will race with it
1159	 * Set a decelerate pending flag that will be actioned when the acceleration command is complete.
1160	 */
1161	if (feci->accel_mode == ECM_FRONT_END_ACCELERATION_MODE_ACCEL_PENDING) {
1162		feci->stats.decelerate_pending = true;
1163		spin_unlock_bh(&feci->lock);
1164		return;
1165	}
1166
1167	/*
1168	 * Can only decelerate if accelerated
1169	 * NOTE: This will also deny accel when the connection is in fail condition too.
1170	 */
1171	if (feci->accel_mode != ECM_FRONT_END_ACCELERATION_MODE_ACCEL) {
1172		spin_unlock_bh(&feci->lock);
1173		return;
1174	}
1175
1176	/*
1177	 * Initiate deceleration
1178	 */
1179	feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_DECEL_PENDING;
1180	spin_unlock_bh(&feci->lock);
1181
1182	/*
1183	 * Increment the decel pending counter
1184	 */
1185	spin_lock_bh(&ecm_nss_ipv6_lock);
1186	ecm_nss_ipv6_pending_decel_count++;
1187	spin_unlock_bh(&ecm_nss_ipv6_lock);
1188
1189	/*
1190	 * Prepare deceleration message
1191	 */
1192	nss_ipv6_msg_init(&nim, NSS_IPV6_RX_INTERFACE, NSS_IPV6_TX_DESTROY_RULE_MSG,
1193			sizeof(struct nss_ipv6_rule_destroy_msg),
1194			ecm_nss_non_ported_ipv6_connection_destroy_callback,
1195			(void *)ecm_db_connection_serial_get(feci->ci));
1196
1197	nirdm = &nim.msg.rule_destroy;
1198	nirdm->tuple.protocol = (int32_t)protocol;
1199
1200	/*
1201	 * Get addressing information
1202	 */
1203	ecm_db_connection_from_address_get(feci->ci, src_ip);
1204	ECM_IP_ADDR_TO_NSS_IPV6_ADDR(nirdm->tuple.flow_ip, src_ip);
1205	ecm_db_connection_to_address_nat_get(feci->ci, dest_ip);
1206	ECM_IP_ADDR_TO_NSS_IPV6_ADDR(nirdm->tuple.return_ip, dest_ip);
1207	nirdm->tuple.flow_ident = ecm_db_connection_from_port_get(feci->ci);
1208	nirdm->tuple.return_ident = ecm_db_connection_to_port_nat_get(feci->ci);
1209
1210	DEBUG_INFO("%p: NON_PORTED Connection %p decelerate\n"
1211			"src_ip: " ECM_IP_ADDR_OCTAL_FMT ":%d\n"
1212			"dest_ip: " ECM_IP_ADDR_OCTAL_FMT ":%d\n",
1213			nnpci, feci->ci,
1214			ECM_IP_ADDR_TO_OCTAL(src_ip), nirdm->tuple.flow_ident,
1215			ECM_IP_ADDR_TO_OCTAL(dest_ip), nirdm->tuple.return_ident);
1216
1217	/*
1218	 * Take a ref to the feci->ci so that it will persist until we get a response from the NSS.
1219	 * NOTE: This will implicitly hold the feci too.
1220	 */
1221	ecm_db_connection_ref(feci->ci);
1222
1223	/*
1224	 * We are about to issue the command, record the time of transmission
1225	 */
1226	spin_lock_bh(&feci->lock);
1227	feci->stats.cmd_time_begun = jiffies;
1228	spin_unlock_bh(&feci->lock);
1229
1230	/*
1231	 * Destroy the NSS connection cache entry.
1232	 */
1233	nss_tx_status = nss_ipv6_tx(ecm_nss_ipv6_nss_ipv6_mgr, &nim);
1234	if (nss_tx_status == NSS_TX_SUCCESS) {
1235		/*
1236		 * Reset the driver_fail count - transmission was okay here.
1237		 */
1238		spin_lock_bh(&feci->lock);
1239		feci->stats.driver_fail = 0;
1240		spin_unlock_bh(&feci->lock);
1241		return;
1242	}
1243
1244	/*
1245	 * Release the ref take, NSS driver did not accept our command.
1246	 */
1247	ecm_db_connection_deref(feci->ci);
1248
1249	/*
1250	 * TX failed
1251	 */
1252	spin_lock_bh(&feci->lock);
1253	feci->stats.driver_fail_total++;
1254	feci->stats.driver_fail++;
1255	if (feci->stats.driver_fail >= feci->stats.driver_fail_limit) {
1256		DEBUG_WARN("%p: Decel failed - driver fail limit\n", nnpci);
1257		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DRIVER;
1258	}
1259	spin_unlock_bh(&feci->lock);
1260
1261	/*
1262	 * Could not send the request, decrement the decel pending counter
1263	 */
1264	spin_lock_bh(&ecm_nss_ipv6_lock);
1265	ecm_nss_ipv6_pending_decel_count--;
1266	DEBUG_ASSERT(ecm_nss_ipv6_pending_decel_count >= 0, "Bad decel pending counter\n");
1267	spin_unlock_bh(&ecm_nss_ipv6_lock);
1268
1269}
1270
1271/*
1272 * ecm_nss_non_ported_ipv6_connection_defunct_callback()
1273 *	Callback to be called when a non-ported connection has become defunct.
1274 */
1275static void ecm_nss_non_ported_ipv6_connection_defunct_callback(void *arg)
1276{
1277	struct ecm_front_end_connection_instance *feci = (struct ecm_front_end_connection_instance *)arg;
1278	struct ecm_nss_non_ported_ipv6_connection_instance *nnpci = (struct ecm_nss_non_ported_ipv6_connection_instance *)feci;
1279
1280	DEBUG_CHECK_MAGIC(nnpci, ECM_NSS_NON_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", nnpci);
1281
1282	spin_lock_bh(&feci->lock);
1283
1284	/*
1285	 * If connection has already become defunct, do nothing.
1286	 */
1287	if (feci->is_defunct) {
1288		spin_unlock_bh(&feci->lock);
1289		return;
1290	}
1291	feci->is_defunct = true;
1292
1293	/*
1294	 * If the connection is already in one of the fail modes, do nothing, keep the current accel_mode.
1295	 */
1296	if (ECM_FRONT_END_ACCELERATION_FAILED(feci->accel_mode)) {
1297		spin_unlock_bh(&feci->lock);
1298		return;
1299	}
1300
1301	/*
1302	 * If the connection is decel then ensure it will not attempt accel while defunct.
1303	 */
1304	if (feci->accel_mode == ECM_FRONT_END_ACCELERATION_MODE_DECEL) {
1305		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT;
1306		spin_unlock_bh(&feci->lock);
1307		return;
1308	}
1309
1310	/*
1311	 * If the connection is decel pending then decel operation is in progress anyway.
1312	 */
1313	if (feci->accel_mode == ECM_FRONT_END_ACCELERATION_MODE_DECEL_PENDING) {
1314		spin_unlock_bh(&feci->lock);
1315		return;
1316	}
1317
1318	/*
1319	 * If none of the cases matched above, this means the connection is in one of the
1320	 * accel modes (accel or accel_pending) so we force a deceleration.
1321	 * NOTE: If the mode is accel pending then the decel will be actioned when that is completed.
1322	 */
1323	spin_unlock_bh(&feci->lock);
1324	ecm_nss_non_ported_ipv6_connection_decelerate(feci);
1325}
1326
1327/*
1328 * ecm_nss_non_ported_ipv6_connection_accel_state_get()
1329 *	Get acceleration state
1330 */
1331static ecm_front_end_acceleration_mode_t ecm_nss_non_ported_ipv6_connection_accel_state_get(struct ecm_front_end_connection_instance *feci)
1332{
1333	struct ecm_nss_non_ported_ipv6_connection_instance *nnpci = (struct ecm_nss_non_ported_ipv6_connection_instance *)feci;
1334	ecm_front_end_acceleration_mode_t state;
1335
1336	DEBUG_CHECK_MAGIC(nnpci, ECM_NSS_NON_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", nnpci);
1337	spin_lock_bh(&feci->lock);
1338	state = feci->accel_mode;
1339	spin_unlock_bh(&feci->lock);
1340	return state;
1341}
1342
1343/*
1344 * ecm_nss_non_ported_ipv6_connection_action_seen()
1345 *	Acceleration action / activity has been seen for this connection.
1346 *
1347 * NOTE: Call the action_seen() method when the NSS has demonstrated that it has offloaded some data for a connection.
1348 */
1349static void ecm_nss_non_ported_ipv6_connection_action_seen(struct ecm_front_end_connection_instance *feci)
1350{
1351	struct ecm_nss_non_ported_ipv6_connection_instance *nnpci = (struct ecm_nss_non_ported_ipv6_connection_instance *)feci;
1352
1353	DEBUG_CHECK_MAGIC(nnpci, ECM_NSS_NON_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", nnpci);
1354
1355	DEBUG_INFO("%p: Action seen\n", nnpci);
1356	spin_lock_bh(&feci->lock);
1357	feci->stats.no_action_seen = 0;
1358	spin_unlock_bh(&feci->lock);
1359}
1360
1361/*
1362 * ecm_nss_non_ported_ipv6_connection_accel_ceased()
1363 *	NSS has indicated that acceleration has stopped.
1364 *
1365 * NOTE: This is called in response to an NSS self-initiated termination of acceleration.
1366 * This must NOT be called because the ECM terminated the acceleration.
1367 */
1368static void ecm_nss_non_ported_ipv6_connection_accel_ceased(struct ecm_front_end_connection_instance *feci)
1369{
1370	struct ecm_nss_non_ported_ipv6_connection_instance *nnpci = (struct ecm_nss_non_ported_ipv6_connection_instance *)feci;
1371
1372	DEBUG_CHECK_MAGIC(nnpci, ECM_NSS_NON_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", nnpci);
1373
1374	DEBUG_INFO("%p: accel ceased\n", nnpci);
1375
1376	spin_lock_bh(&feci->lock);
1377
1378	/*
1379	 * If we are in accel-pending state then the NSS has issued a flush out-of-order
1380	 * with the ACK/NACK we are actually waiting for.
1381	 * To work around this we record a "flush has already happened" and will action it when we finally get that ACK/NACK.
1382	 * GGG TODO This should eventually be removed when the NSS honours messaging sequence.
1383	 */
1384	if (feci->accel_mode == ECM_FRONT_END_ACCELERATION_MODE_ACCEL_PENDING) {
1385		feci->stats.flush_happened = true;
1386		feci->stats.flush_happened_total++;
1387		spin_unlock_bh(&feci->lock);
1388		return;
1389	}
1390
1391	/*
1392	 * If connection is no longer accelerated by the time we get here just ignore the command
1393	 */
1394	if (feci->accel_mode != ECM_FRONT_END_ACCELERATION_MODE_ACCEL) {
1395		spin_unlock_bh(&feci->lock);
1396		return;
1397	}
1398
1399	/*
1400	 * If the no_action_seen counter was not reset then acceleration ended without any offload action
1401	 */
1402	if (feci->stats.no_action_seen) {
1403		feci->stats.no_action_seen_total++;
1404	}
1405
1406	/*
1407	 * If the no_action_seen indicates successive cessations of acceleration without any offload action occuring
1408	 * then we fail out this connection
1409	 */
1410	if (feci->stats.no_action_seen >= feci->stats.no_action_seen_limit) {
1411		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_NO_ACTION;
1412	} else {
1413		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_DECEL;
1414	}
1415	spin_unlock_bh(&feci->lock);
1416
1417	/*
1418	 * Non-Ported acceleration ends
1419	 */
1420	spin_lock_bh(&ecm_nss_ipv6_lock);
1421	ecm_nss_non_ported_ipv6_accelerated_count--;	/* Protocol specific counter */
1422	DEBUG_ASSERT(ecm_nss_non_ported_ipv6_accelerated_count >= 0, "Bad non-ported accel counter\n");
1423	ecm_nss_ipv6_accelerated_count--;		/* General running counter */
1424	DEBUG_ASSERT(ecm_nss_ipv6_accelerated_count >= 0, "Bad accel counter\n");
1425	spin_unlock_bh(&ecm_nss_ipv6_lock);
1426}
1427
1428/*
1429 * ecm_nss_non_ported_ipv6_connection_ref()
1430 *	Ref a connection front end instance
1431 */
1432static void ecm_nss_non_ported_ipv6_connection_ref(struct ecm_front_end_connection_instance *feci)
1433{
1434	struct ecm_nss_non_ported_ipv6_connection_instance *nnpci = (struct ecm_nss_non_ported_ipv6_connection_instance *)feci;
1435
1436	DEBUG_CHECK_MAGIC(nnpci, ECM_NSS_NON_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", nnpci);
1437	spin_lock_bh(&feci->lock);
1438	feci->refs++;
1439	DEBUG_TRACE("%p: nnpci ref %d\n", nnpci, feci->refs);
1440	DEBUG_ASSERT(feci->refs > 0, "%p: ref wrap\n", nnpci);
1441	spin_unlock_bh(&feci->lock);
1442}
1443
1444/*
1445 * ecm_nss_non_ported_ipv6_connection_deref()
1446 *	Deref a connection front end instance
1447 */
1448static int ecm_nss_non_ported_ipv6_connection_deref(struct ecm_front_end_connection_instance *feci)
1449{
1450	struct ecm_nss_non_ported_ipv6_connection_instance *nnpci = (struct ecm_nss_non_ported_ipv6_connection_instance *)feci;
1451
1452	DEBUG_CHECK_MAGIC(nnpci, ECM_NSS_NON_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", nnpci);
1453
1454	spin_lock_bh(&feci->lock);
1455	feci->refs--;
1456	DEBUG_ASSERT(feci->refs >= 0, "%p: ref wrap\n", nnpci);
1457
1458	if (feci->refs > 0) {
1459		int refs = feci->refs;
1460		spin_unlock_bh(&feci->lock);
1461		DEBUG_TRACE("%p: nnpci deref %d\n", nnpci, refs);
1462		return refs;
1463	}
1464	spin_unlock_bh(&feci->lock);
1465
1466	/*
1467	 * We can now destroy the instance
1468	 */
1469	DEBUG_TRACE("%p: nnpci final\n", nnpci);
1470	DEBUG_CLEAR_MAGIC(nnpci);
1471	kfree(nnpci);
1472
1473	return 0;
1474}
1475
1476#ifdef ECM_STATE_OUTPUT_ENABLE
1477/*
1478 * ecm_nss_non_ported_ipv6_connection_state_get()
1479 *	Return the state of this Non ported front end instance
1480 */
1481static int ecm_nss_non_ported_ipv6_connection_state_get(struct ecm_front_end_connection_instance *feci, struct ecm_state_file_instance *sfi)
1482{
1483	int result;
1484	bool can_accel;
1485	ecm_front_end_acceleration_mode_t accel_mode;
1486	struct ecm_front_end_connection_mode_stats stats;
1487	struct ecm_nss_non_ported_ipv6_connection_instance *nnpci = (struct ecm_nss_non_ported_ipv6_connection_instance *)feci;
1488
1489	DEBUG_CHECK_MAGIC(nnpci, ECM_NSS_NON_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", nnpci);
1490
1491	spin_lock_bh(&feci->lock);
1492	can_accel = feci->can_accel;
1493	accel_mode = feci->accel_mode;
1494	memcpy(&stats, &feci->stats, sizeof(struct ecm_front_end_connection_mode_stats));
1495	spin_unlock_bh(&feci->lock);
1496
1497	if ((result = ecm_state_prefix_add(sfi, "front_end_v6.non_ported"))) {
1498		return result;
1499	}
1500
1501	if ((result = ecm_state_write(sfi, "can_accel", "%d", can_accel))) {
1502		return result;
1503	}
1504	if ((result = ecm_state_write(sfi, "accel_mode", "%d", accel_mode))) {
1505		return result;
1506	}
1507	if ((result = ecm_state_write(sfi, "decelerate_pending", "%d", stats.decelerate_pending))) {
1508		return result;
1509	}
1510	if ((result = ecm_state_write(sfi, "flush_happened_total", "%d", stats.flush_happened_total))) {
1511		return result;
1512	}
1513	if ((result = ecm_state_write(sfi, "no_action_seen_total", "%d", stats.no_action_seen_total))) {
1514		return result;
1515	}
1516	if ((result = ecm_state_write(sfi, "no_action_seen", "%d", stats.no_action_seen))) {
1517		return result;
1518	}
1519	if ((result = ecm_state_write(sfi, "no_action_seen_limit", "%d", stats.no_action_seen_limit))) {
1520		return result;
1521	}
1522	if ((result = ecm_state_write(sfi, "driver_fail_total", "%d", stats.driver_fail_total))) {
1523		return result;
1524	}
1525	if ((result = ecm_state_write(sfi, "driver_fail", "%d", stats.driver_fail))) {
1526		return result;
1527	}
1528	if ((result = ecm_state_write(sfi, "driver_fail_limit", "%d", stats.driver_fail_limit))) {
1529		return result;
1530	}
1531	if ((result = ecm_state_write(sfi, "ae_nack_total", "%d", stats.ae_nack_total))) {
1532		return result;
1533	}
1534	if ((result = ecm_state_write(sfi, "ae_nack", "%d", stats.ae_nack))) {
1535		return result;
1536	}
1537	if ((result = ecm_state_write(sfi, "ae_nack_limit", "%d", stats.ae_nack_limit))) {
1538		return result;
1539	}
1540
1541 	return ecm_state_prefix_remove(sfi);
1542}
1543#endif
1544
1545/*
1546 * ecm_nss_non_ported_ipv6_connection_instance_alloc()
1547 *	Create a front end instance specific for non-ported connection
1548 */
1549static struct ecm_nss_non_ported_ipv6_connection_instance *ecm_nss_non_ported_ipv6_connection_instance_alloc(
1550								struct ecm_db_connection_instance *ci,
1551								bool can_accel)
1552{
1553	struct ecm_nss_non_ported_ipv6_connection_instance *nnpci;
1554	struct ecm_front_end_connection_instance *feci;
1555
1556	nnpci = (struct ecm_nss_non_ported_ipv6_connection_instance *)kzalloc(sizeof(struct ecm_nss_non_ported_ipv6_connection_instance), GFP_ATOMIC | __GFP_NOWARN);
1557	if (!nnpci) {
1558		DEBUG_WARN("Non-Ported Front end alloc failed\n");
1559		return NULL;
1560	}
1561
1562	/*
1563	 * Refs is 1 for the creator of the connection
1564	 */
1565	feci = (struct ecm_front_end_connection_instance *)nnpci;
1566	feci->refs = 1;
1567	DEBUG_SET_MAGIC(nnpci, ECM_NSS_NON_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC);
1568	spin_lock_init(&feci->lock);
1569
1570	feci->can_accel = can_accel;
1571	feci->accel_mode = (can_accel) ? ECM_FRONT_END_ACCELERATION_MODE_DECEL : ECM_FRONT_END_ACCELERATION_MODE_FAIL_DENIED;
1572	spin_lock_bh(&ecm_nss_ipv6_lock);
1573	feci->stats.no_action_seen_limit = ecm_nss_ipv6_no_action_limit_default;
1574	feci->stats.driver_fail_limit = ecm_nss_ipv6_driver_fail_limit_default;
1575	feci->stats.ae_nack_limit = ecm_nss_ipv6_nack_limit_default;
1576	spin_unlock_bh(&ecm_nss_ipv6_lock);
1577
1578	/*
1579	 * Copy reference to connection - no need to ref ci as ci maintains a ref to this instance instead (this instance persists for as long as ci does)
1580	 */
1581	feci->ci = ci;
1582
1583	/*
1584	 * Populate the methods and callbacks
1585	 */
1586	feci->ref = ecm_nss_non_ported_ipv6_connection_ref;
1587	feci->deref = ecm_nss_non_ported_ipv6_connection_deref;
1588	feci->decelerate = ecm_nss_non_ported_ipv6_connection_decelerate;
1589	feci->accel_state_get = ecm_nss_non_ported_ipv6_connection_accel_state_get;
1590	feci->action_seen = ecm_nss_non_ported_ipv6_connection_action_seen;
1591	feci->accel_ceased = ecm_nss_non_ported_ipv6_connection_accel_ceased;
1592#ifdef ECM_STATE_OUTPUT_ENABLE
1593	feci->state_get = ecm_nss_non_ported_ipv6_connection_state_get;
1594#endif
1595	feci->ae_interface_number_by_dev_get = ecm_nss_common_get_interface_number_by_dev;
1596
1597	return nnpci;
1598}
1599
1600/*
1601 * ecm_nss_non_ported_ipv6_process()
1602 *	Process a protocol that does not have port based identifiers
1603 */
1604unsigned int ecm_nss_non_ported_ipv6_process(struct net_device *out_dev,
1605							struct net_device *in_dev,
1606							uint8_t *src_node_addr,
1607							uint8_t *dest_node_addr,
1608							bool can_accel, bool is_routed, bool is_l2_encap, struct sk_buff *skb,
1609							struct ecm_tracker_ip_header *ip_hdr,
1610							struct nf_conn *ct, ecm_tracker_sender_type_t sender, ecm_db_direction_t ecm_dir,
1611							struct nf_conntrack_tuple *orig_tuple, struct nf_conntrack_tuple *reply_tuple,
1612							ip_addr_t ip_src_addr, ip_addr_t ip_dest_addr)
1613{
1614	struct ecm_db_connection_instance *ci;
1615	int protocol;
1616	int src_port;
1617	int dest_port;
1618	ip_addr_t match_addr;
1619	struct ecm_classifier_instance *assignments[ECM_CLASSIFIER_TYPES];
1620	int aci_index;
1621	int assignment_count;
1622	ecm_db_timer_group_t ci_orig_timer_group;
1623	struct ecm_classifier_process_response prevalent_pr;
1624
1625	DEBUG_TRACE("Non-ported protocol src: " ECM_IP_ADDR_OCTAL_FMT ", dest: " ECM_IP_ADDR_OCTAL_FMT "\n",
1626				ECM_IP_ADDR_TO_OCTAL(ip_src_addr), ECM_IP_ADDR_TO_OCTAL(ip_dest_addr));
1627
1628	/*
1629	 * Look up a connection.
1630	 */
1631	protocol = (int)orig_tuple->dst.protonum;
1632	if ((protocol == IPPROTO_IPIP)) {
1633		src_port = 0;
1634		dest_port = 0;
1635	} else {
1636		/*
1637		 * Do not accelerate the non-ported connections except the ones we handle.
1638		 */
1639		can_accel = false;
1640
1641		/*
1642		 * port numbers are just the negative protocol number equivalents for now.
1643		 * GGG They could eventually be used as protocol specific identifiers such as icmp id's etc.
1644		 */
1645		src_port = -protocol;
1646		dest_port = -protocol;
1647	}
1648	ci = ecm_db_connection_find_and_ref(ip_src_addr, ip_dest_addr, protocol, src_port, dest_port);
1649
1650	/*
1651	 * If there is no existing connection then create a new one.
1652	 */
1653	if (unlikely(!ci)) {
1654		struct ecm_db_mapping_instance *src_mi;
1655		struct ecm_db_mapping_instance *dest_mi;
1656		struct ecm_db_node_instance *src_ni;
1657		struct ecm_db_node_instance *dest_ni;
1658		struct ecm_classifier_default_instance *dci;
1659		struct ecm_front_end_connection_instance *feci;
1660		struct ecm_db_connection_instance *nci;
1661		ecm_classifier_type_t classifier_type;
1662		int32_t to_list_first;
1663		struct ecm_db_iface_instance *to_list[ECM_DB_IFACE_HEIRARCHY_MAX];
1664		int32_t from_list_first;
1665		struct ecm_db_iface_instance *from_list[ECM_DB_IFACE_HEIRARCHY_MAX];
1666
1667		DEBUG_INFO("New connection from " ECM_IP_ADDR_OCTAL_FMT " to " ECM_IP_ADDR_OCTAL_FMT "\n", ECM_IP_ADDR_TO_OCTAL(ip_src_addr), ECM_IP_ADDR_TO_OCTAL(ip_dest_addr));
1668
1669		/*
1670		 * Before we attempt to create the connection are we being terminated?
1671		 */
1672		spin_lock_bh(&ecm_nss_ipv6_lock);
1673		if (ecm_nss_ipv6_terminate_pending) {
1674			spin_unlock_bh(&ecm_nss_ipv6_lock);
1675			DEBUG_WARN("Terminating\n");
1676
1677			/*
1678			 * As we are terminating we just allow the packet to pass - it's no longer our concern
1679			 */
1680			return NF_ACCEPT;
1681		}
1682		spin_unlock_bh(&ecm_nss_ipv6_lock);
1683
1684		/*
1685		 * Does this connection have a conntrack entry?
1686		 */
1687		if (ct) {
1688			unsigned int conn_count;
1689
1690			/*
1691			 * If we have exceeded the connection limit (according to conntrack) then abort
1692			 * NOTE: Conntrack, when at its limit, will destroy a connection to make way for a new.
1693			 * Conntrack won't exceed its limit but ECM can due to it needing to hold connections while
1694			 * acceleration commands are in-flight.
1695			 * This means that ECM can 'fall behind' somewhat with the connection state wrt conntrack connection state.
1696			 * This is not seen as an issue since conntrack will have issued us with a destroy event for the flushed connection(s)
1697			 * and we will eventually catch up.
1698			 * Since ECM is capable of handling connections mid-flow ECM will pick up where it can.
1699			 */
1700			conn_count = (unsigned int)ecm_db_connection_count_get();
1701			if (conn_count >= nf_conntrack_max) {
1702				DEBUG_WARN("ECM Connection count limit reached: db: %u, ct: %u\n", conn_count, nf_conntrack_max);
1703				return NF_ACCEPT;
1704			}
1705		}
1706
1707		/*
1708		 * Now allocate the new connection
1709		 */
1710		nci = ecm_db_connection_alloc();
1711		if (!nci) {
1712			DEBUG_WARN("Failed to allocate connection\n");
1713			return NF_ACCEPT;
1714		}
1715
1716		/*
1717		 * Connection must have a front end instance associated with it
1718		 */
1719		feci = (struct ecm_front_end_connection_instance *)ecm_nss_non_ported_ipv6_connection_instance_alloc(nci, can_accel);
1720		if (!feci) {
1721			ecm_db_connection_deref(nci);
1722			DEBUG_WARN("Failed to allocate front end\n");
1723			return NF_ACCEPT;
1724		}
1725
1726		/*
1727		 * Get the src and destination mappings
1728		 * For this we also need the interface lists which we also set upon the new connection while we are at it.
1729		 * GGG TODO rework terms of "src/dest" - these need to be named consistently as from/to as per database terms.
1730		 * GGG TODO The empty list checks should not be needed, mapping_establish_and_ref() should fail out if there is no list anyway.
1731		 */
1732		DEBUG_TRACE("%p: Create the 'from' interface heirarchy list\n", nci);
1733		from_list_first = ecm_interface_heirarchy_construct(feci, from_list, ip_dest_addr, ip_src_addr, 6, protocol, in_dev, is_routed, in_dev, src_node_addr, dest_node_addr, NULL);
1734		if (from_list_first == ECM_DB_IFACE_HEIRARCHY_MAX) {
1735			feci->deref(feci);
1736			ecm_db_connection_deref(nci);
1737			DEBUG_WARN("Failed to obtain 'from' heirarchy list\n");
1738			return NF_ACCEPT;
1739		}
1740		ecm_db_connection_from_interfaces_reset(nci, from_list, from_list_first);
1741
1742		DEBUG_TRACE("%p: Create source node\n", nci);
1743		src_ni = ecm_nss_ipv6_node_establish_and_ref(feci, in_dev, ip_src_addr, from_list, from_list_first, src_node_addr);
1744		ecm_db_connection_interfaces_deref(from_list, from_list_first);
1745		if (!src_ni) {
1746			feci->deref(feci);
1747			ecm_db_connection_deref(nci);
1748			DEBUG_WARN("Failed to establish source node\n");
1749			return NF_ACCEPT;
1750		}
1751
1752		DEBUG_TRACE("%p: Create source mapping\n", nci);
1753		src_mi = ecm_nss_ipv6_mapping_establish_and_ref(ip_src_addr, src_port);
1754		if (!src_mi) {
1755			ecm_db_node_deref(src_ni);
1756			feci->deref(feci);
1757			ecm_db_connection_deref(nci);
1758			DEBUG_WARN("Failed to establish src mapping\n");
1759			return NF_ACCEPT;
1760		}
1761
1762		DEBUG_TRACE("%p: Create the 'to' interface heirarchy list\n", nci);
1763		to_list_first = ecm_interface_heirarchy_construct(feci, to_list, ip_src_addr, ip_dest_addr, 6, protocol, out_dev, is_routed, in_dev, dest_node_addr, src_node_addr, NULL);
1764		if (to_list_first == ECM_DB_IFACE_HEIRARCHY_MAX) {
1765			ecm_db_mapping_deref(src_mi);
1766			ecm_db_node_deref(src_ni);
1767			feci->deref(feci);
1768			ecm_db_connection_deref(nci);
1769			DEBUG_WARN("Failed to obtain 'to' heirarchy list\n");
1770			return NF_ACCEPT;
1771		}
1772		ecm_db_connection_to_interfaces_reset(nci, to_list, to_list_first);
1773
1774		DEBUG_TRACE("%p: Create dest node\n", nci);
1775		dest_ni = ecm_nss_ipv6_node_establish_and_ref(feci, out_dev, ip_dest_addr, to_list, to_list_first, dest_node_addr);
1776		ecm_db_connection_interfaces_deref(to_list, to_list_first);
1777		if (!dest_ni) {
1778			ecm_db_mapping_deref(src_mi);
1779			ecm_db_node_deref(src_ni);
1780			feci->deref(feci);
1781			ecm_db_connection_deref(nci);
1782			DEBUG_WARN("Failed to establish dest node\n");
1783			return NF_ACCEPT;
1784		}
1785
1786		DEBUG_TRACE("%p: Create dest mapping\n", nci);
1787		dest_mi = ecm_nss_ipv6_mapping_establish_and_ref(ip_dest_addr, dest_port);
1788		if (!dest_mi) {
1789			ecm_db_node_deref(dest_ni);
1790			ecm_db_mapping_deref(src_mi);
1791			ecm_db_node_deref(src_ni);
1792			feci->deref(feci);
1793			ecm_db_connection_deref(nci);
1794			DEBUG_WARN("Failed to establish dest mapping\n");
1795			return NF_ACCEPT;
1796		}
1797
1798		/*
1799		 * Every connection also needs a default classifier
1800		 */
1801		dci = ecm_classifier_default_instance_alloc(nci, protocol, ecm_dir, src_port, dest_port);
1802		if (!dci) {
1803			ecm_db_mapping_deref(dest_mi);
1804			ecm_db_node_deref(dest_ni);
1805			ecm_db_mapping_deref(src_mi);
1806			ecm_db_node_deref(src_ni);
1807			feci->deref(feci);
1808			ecm_db_connection_deref(nci);
1809			DEBUG_WARN("Failed to allocate default classifier\n");
1810			return NF_ACCEPT;
1811		}
1812		ecm_db_connection_classifier_assign(nci, (struct ecm_classifier_instance *)dci);
1813
1814		/*
1815		 * Every connection starts with a full complement of classifiers assigned.
1816		 * NOTE: Default classifier is a special case considered previously
1817		 */
1818		for (classifier_type = ECM_CLASSIFIER_TYPE_DEFAULT + 1; classifier_type < ECM_CLASSIFIER_TYPES; ++classifier_type) {
1819			struct ecm_classifier_instance *aci = ecm_nss_ipv6_assign_classifier(nci, classifier_type);
1820			if (aci) {
1821				aci->deref(aci);
1822			} else {
1823				dci->base.deref((struct ecm_classifier_instance *)dci);
1824				ecm_db_mapping_deref(dest_mi);
1825				ecm_db_node_deref(dest_ni);
1826				ecm_db_mapping_deref(src_mi);
1827				ecm_db_node_deref(src_ni);
1828				feci->deref(feci);
1829				ecm_db_connection_deref(nci);
1830				DEBUG_WARN("Failed to allocate classifiers assignments\n");
1831				return NF_ACCEPT;
1832			}
1833		}
1834
1835		/*
1836		 * Now add the connection into the database.
1837		 * NOTE: In an SMP situation such as ours there is a possibility that more than one packet for the same
1838		 * connection is being processed simultaneously.
1839		 * We *could* end up creating more than one connection instance for the same actual connection.
1840		 * To guard against this we now perform a mutex'd lookup of the connection + add once more - another cpu may have created it before us.
1841		 */
1842		spin_lock_bh(&ecm_nss_ipv6_lock);
1843		ci = ecm_db_connection_find_and_ref(ip_src_addr, ip_dest_addr, protocol, src_port, dest_port);
1844		if (ci) {
1845			/*
1846			 * Another cpu created the same connection before us - use the one we just found
1847			 */
1848			spin_unlock_bh(&ecm_nss_ipv6_lock);
1849			ecm_db_connection_deref(nci);
1850		} else {
1851			struct ecm_tracker_instance *ti;
1852			ecm_db_timer_group_t tg;
1853			ecm_tracker_sender_state_t src_state;
1854			ecm_tracker_sender_state_t dest_state;
1855			ecm_tracker_connection_state_t state;
1856
1857			/*
1858			 * Ask tracker for timer group to set the connection to initially.
1859			 */
1860			ti = dci->tracker_get_and_ref(dci);
1861			ti->state_get(ti, &src_state, &dest_state, &state, &tg);
1862			ti->deref(ti);
1863
1864			/*
1865			 * Add the new connection we created into the database
1866			 * NOTE: assign to a short timer group for now - it is the assigned classifiers responsibility to do this
1867			 */
1868			ecm_db_connection_add(nci, feci, src_mi, dest_mi, src_mi, dest_mi,
1869					src_ni, dest_ni, src_ni, dest_ni,
1870					6, protocol, ecm_dir,
1871					NULL /* final callback */,
1872					ecm_nss_non_ported_ipv6_connection_defunct_callback,
1873					tg, is_routed, nci);
1874
1875			spin_unlock_bh(&ecm_nss_ipv6_lock);
1876
1877			ci = nci;
1878			DEBUG_INFO("%p: New Non-ported protocol %d connection created\n", ci, protocol);
1879		}
1880
1881		/*
1882		 * No longer need referenecs to the objects we created
1883		 */
1884		dci->base.deref((struct ecm_classifier_instance *)dci);
1885		ecm_db_mapping_deref(dest_mi);
1886		ecm_db_node_deref(dest_ni);
1887		ecm_db_mapping_deref(src_mi);
1888		ecm_db_node_deref(src_ni);
1889		feci->deref(feci);
1890	}
1891
1892	/*
1893	 * Keep connection alive as we have seen activity
1894	 */
1895	if (!ecm_db_connection_defunct_timer_touch(ci)) {
1896		ecm_db_connection_deref(ci);
1897		return NF_ACCEPT;
1898	}
1899
1900	/*
1901	 * Identify which side of the connection is sending
1902	 * NOTE: This may be different than what sender is at the moment
1903	 * given the connection we have located.
1904	 */
1905	ecm_db_connection_from_address_get(ci, match_addr);
1906	if (ECM_IP_ADDR_MATCH(ip_src_addr, match_addr)) {
1907		sender = ECM_TRACKER_SENDER_TYPE_SRC;
1908	} else {
1909		sender = ECM_TRACKER_SENDER_TYPE_DEST;
1910	}
1911
1912	/*
1913	 * Do we need to action generation change?
1914	 */
1915	if (unlikely(ecm_db_connection_regeneration_required_check(ci))) {
1916		ecm_nss_ipv6_connection_regenerate(ci, sender, out_dev, in_dev);
1917	}
1918
1919	/*
1920	 * Iterate the assignments and call to process!
1921	 * Policy implemented:
1922	 * 1. Classifiers that say they are not relevant are unassigned and not actioned further.
1923	 * 2. Any drop command from any classifier is honoured.
1924	 * 3. Accel is never allowed for non-ported type connections.
1925	 * 4. Only the highest priority classifier, that actions it, will have its qos tag honoured.
1926	 * 5. Only the highest priority classifier, that actions it, will have its timer group honoured.
1927	 */
1928	DEBUG_TRACE("%p: process begin, skb: %p\n", ci, skb);
1929	prevalent_pr.process_actions = 0;
1930	prevalent_pr.drop = false;
1931	prevalent_pr.flow_qos_tag = skb->priority;
1932	prevalent_pr.return_qos_tag = skb->priority;
1933	prevalent_pr.accel_mode = ECM_CLASSIFIER_ACCELERATION_MODE_ACCEL;
1934	prevalent_pr.timer_group = ci_orig_timer_group = ecm_db_connection_timer_group_get(ci);
1935
1936	assignment_count = ecm_db_connection_classifier_assignments_get_and_ref(ci, assignments);
1937	for (aci_index = 0; aci_index < assignment_count; ++aci_index) {
1938		struct ecm_classifier_process_response aci_pr;
1939		struct ecm_classifier_instance *aci;
1940
1941		aci = assignments[aci_index];
1942		DEBUG_TRACE("%p: process: %p, type: %d\n", ci, aci, aci->type_get(aci));
1943		aci->process(aci, sender, ip_hdr, skb, &aci_pr);
1944		DEBUG_TRACE("%p: aci_pr: process actions: %x, became relevant: %u, relevance: %d, drop: %d, "
1945				"flow_qos_tag: %u, return_qos_tag: %u, accel_mode: %x, timer_group: %d\n",
1946				ci, aci_pr.process_actions, aci_pr.became_relevant, aci_pr.relevance, aci_pr.drop,
1947				aci_pr.flow_qos_tag, aci_pr.return_qos_tag, aci_pr.accel_mode, aci_pr.timer_group);
1948
1949		if (aci_pr.relevance == ECM_CLASSIFIER_RELEVANCE_NO) {
1950			ecm_classifier_type_t aci_type;
1951
1952			/*
1953			 * This classifier can be unassigned - PROVIDED it is not the default classifier
1954			 */
1955			aci_type = aci->type_get(aci);
1956			if (aci_type == ECM_CLASSIFIER_TYPE_DEFAULT) {
1957				continue;
1958			}
1959
1960			DEBUG_INFO("%p: Classifier not relevant, unassign: %d", ci, aci_type);
1961			ecm_db_connection_classifier_unassign(ci, aci);
1962			continue;
1963		}
1964
1965		/*
1966		 * Yes or Maybe relevant.
1967		 */
1968		if (aci_pr.process_actions & ECM_CLASSIFIER_PROCESS_ACTION_DROP) {
1969			/*
1970			 * Drop command from any classifier is actioned.
1971			 */
1972			DEBUG_TRACE("%p: wants drop: %p, type: %d, skb: %p\n", ci, aci, aci->type_get(aci), skb);
1973			prevalent_pr.drop |= aci_pr.drop;
1974		}
1975
1976		/*
1977		 * Accel mode permission
1978		 */
1979		if (aci_pr.relevance == ECM_CLASSIFIER_RELEVANCE_MAYBE) {
1980			/*
1981			 * Classifier not sure of its relevance - cannot accel yet
1982			 */
1983			DEBUG_TRACE("%p: accel denied by maybe: %p, type: %d\n", ci, aci, aci->type_get(aci));
1984			prevalent_pr.accel_mode = ECM_CLASSIFIER_ACCELERATION_MODE_NO;
1985		} else {
1986			if (aci_pr.process_actions & ECM_CLASSIFIER_PROCESS_ACTION_ACCEL_MODE) {
1987				if (aci_pr.accel_mode == ECM_CLASSIFIER_ACCELERATION_MODE_NO) {
1988					DEBUG_TRACE("%p: accel denied: %p, type: %d\n", ci, aci, aci->type_get(aci));
1989					prevalent_pr.accel_mode = ECM_CLASSIFIER_ACCELERATION_MODE_NO;
1990				}
1991				/* else yes or don't care about accel */
1992			}
1993		}
1994
1995		/*
1996		 * Timer group (the last classifier i.e. the highest priority one) will 'win'
1997		 */
1998		if (aci_pr.process_actions & ECM_CLASSIFIER_PROCESS_ACTION_TIMER_GROUP) {
1999			DEBUG_TRACE("%p: timer group: %p, type: %d, group: %d\n", ci, aci, aci->type_get(aci), aci_pr.timer_group);
2000			prevalent_pr.timer_group = aci_pr.timer_group;
2001		}
2002
2003		/*
2004		 * Qos tag (the last classifier i.e. the highest priority one) will 'win'
2005		 */
2006		if (aci_pr.process_actions & ECM_CLASSIFIER_PROCESS_ACTION_QOS_TAG) {
2007			DEBUG_TRACE("%p: aci: %p, type: %d, flow qos tag: %u, return qos tag: %u\n",
2008					ci, aci, aci->type_get(aci), aci_pr.flow_qos_tag, aci_pr.return_qos_tag);
2009			prevalent_pr.flow_qos_tag = aci_pr.flow_qos_tag;
2010			prevalent_pr.return_qos_tag = aci_pr.return_qos_tag;
2011		}
2012
2013#ifdef ECM_CLASSIFIER_DSCP_ENABLE
2014		/*
2015		 * If any classifier denied DSCP remarking then that overrides every classifier
2016		 */
2017		if (aci_pr.process_actions & ECM_CLASSIFIER_PROCESS_ACTION_DSCP_DENY) {
2018			DEBUG_TRACE("%p: aci: %p, type: %d, DSCP remark denied\n",
2019					ci, aci, aci->type_get(aci));
2020			prevalent_pr.process_actions |= ECM_CLASSIFIER_PROCESS_ACTION_DSCP_DENY;
2021			prevalent_pr.process_actions &= ~ECM_CLASSIFIER_PROCESS_ACTION_DSCP;
2022		}
2023
2024		/*
2025		 * DSCP remark action, but only if it has not been denied by any classifier
2026		 */
2027		if (aci_pr.process_actions & ECM_CLASSIFIER_PROCESS_ACTION_DSCP) {
2028			if (!(prevalent_pr.process_actions & ECM_CLASSIFIER_PROCESS_ACTION_DSCP_DENY)) {
2029				DEBUG_TRACE("%p: aci: %p, type: %d, DSCP remark wanted, flow_dscp: %u, return dscp: %u\n",
2030						ci, aci, aci->type_get(aci), aci_pr.flow_dscp, aci_pr.return_dscp);
2031				prevalent_pr.process_actions |= ECM_CLASSIFIER_PROCESS_ACTION_DSCP;
2032				prevalent_pr.flow_dscp = aci_pr.flow_dscp;
2033				prevalent_pr.return_dscp = aci_pr.return_dscp;
2034			}
2035		}
2036#endif
2037	}
2038	ecm_db_connection_assignments_release(assignment_count, assignments);
2039
2040	/*
2041	 * Change timer group?
2042	 */
2043	if (ci_orig_timer_group != prevalent_pr.timer_group) {
2044		DEBUG_TRACE("%p: change timer group from: %d to: %d\n", ci, ci_orig_timer_group, prevalent_pr.timer_group);
2045		ecm_db_connection_defunct_timer_reset(ci, prevalent_pr.timer_group);
2046	}
2047
2048	/*
2049	 * Drop?
2050	 */
2051	if (prevalent_pr.drop) {
2052		DEBUG_TRACE("%p: drop: %p\n", ci, skb);
2053		ecm_db_connection_data_totals_update_dropped(ci, (sender == ECM_TRACKER_SENDER_TYPE_SRC)? true : false, skb->len, 1);
2054		ecm_db_connection_deref(ci);
2055		return NF_ACCEPT;
2056	}
2057	ecm_db_connection_data_totals_update(ci, (sender == ECM_TRACKER_SENDER_TYPE_SRC)? true : false, skb->len, 1);
2058
2059	/*
2060	 * Assign qos tag
2061	 * GGG TODO Should we use sender to identify whether to use flow or return qos tag?
2062	 */
2063	skb->priority = prevalent_pr.flow_qos_tag;
2064	DEBUG_TRACE("%p: skb priority: %u\n", ci, skb->priority);
2065
2066	/*
2067	 * Accelerate?
2068	 */
2069	if (prevalent_pr.accel_mode == ECM_CLASSIFIER_ACCELERATION_MODE_ACCEL) {
2070		struct ecm_front_end_connection_instance *feci;
2071		DEBUG_TRACE("%p: accel\n", ci);
2072		feci = ecm_db_connection_front_end_get_and_ref(ci);
2073		ecm_nss_non_ported_ipv6_connection_accelerate(feci, &prevalent_pr, is_l2_encap);
2074		feci->deref(feci);
2075	}
2076	ecm_db_connection_deref(ci);
2077
2078	return NF_ACCEPT;
2079}
2080
2081/*
2082 * ecm_nss_non_ported_ipv6_debugfs_init()
2083 */
2084bool ecm_nss_non_ported_ipv6_debugfs_init(struct dentry *dentry)
2085{
2086	if (!debugfs_create_u32("non_ported_accelerated_count", S_IRUGO, dentry,
2087					(u32 *)&ecm_nss_non_ported_ipv6_accelerated_count)) {
2088		DEBUG_ERROR("Failed to create ecm nss ipv6 non_ported_accelerated_count file in debugfs\n");
2089		return false;
2090	}
2091
2092	return true;
2093}
2094