1/*
2 **************************************************************************
3 * Copyright (c) 2015 The Linux Foundation.  All rights reserved.
4 * Permission to use, copy, modify, and/or distribute this software for
5 * any purpose with or without fee is hereby granted, provided that the
6 * above copyright notice and this permission notice appear in all copies.
7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
14 **************************************************************************
15 */
16
17#include <linux/version.h>
18#include <linux/types.h>
19#include <linux/ip.h>
20#include <linux/tcp.h>
21#include <linux/module.h>
22#include <linux/skbuff.h>
23#include <linux/icmp.h>
24#include <linux/debugfs.h>
25#include <linux/kthread.h>
26#include <linux/pkt_sched.h>
27#include <linux/string.h>
28#include <net/ip6_route.h>
29#include <net/ip6_fib.h>
30#include <net/addrconf.h>
31#include <net/ipv6.h>
32#include <net/tcp.h>
33#include <asm/unaligned.h>
34#include <asm/uaccess.h>	/* for put_user */
35#include <net/ipv6.h>
36#include <linux/inet.h>
37#include <linux/in6.h>
38#include <linux/udp.h>
39#include <linux/tcp.h>
40#include <linux/inetdevice.h>
41#include <linux/if_arp.h>
42#include <linux/netfilter_ipv6.h>
43#include <linux/netfilter_bridge.h>
44#include <linux/if_bridge.h>
45#include <net/arp.h>
46#include <net/netfilter/nf_conntrack.h>
47#include <net/netfilter/nf_conntrack_acct.h>
48#include <net/netfilter/nf_conntrack_helper.h>
49#include <net/netfilter/nf_conntrack_l4proto.h>
50#include <net/netfilter/nf_conntrack_l3proto.h>
51#include <net/netfilter/nf_conntrack_zones.h>
52#include <net/netfilter/nf_conntrack_core.h>
53#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
54#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
55#ifdef ECM_INTERFACE_VLAN_ENABLE
56#include <linux/../../net/8021q/vlan.h>
57#include <linux/if_vlan.h>
58#endif
59
60/*
61 * Debug output levels
62 * 0 = OFF
63 * 1 = ASSERTS / ERRORS
64 * 2 = 1 + WARN
65 * 3 = 2 + INFO
66 * 4 = 3 + TRACE
67 */
68#define DEBUG_LEVEL ECM_SFE_PORTED_IPV6_DEBUG_LEVEL
69
70#include <sfe_drv.h>
71
72#include "ecm_types.h"
73#include "ecm_db_types.h"
74#include "ecm_state.h"
75#include "ecm_tracker.h"
76#include "ecm_classifier.h"
77#include "ecm_front_end_types.h"
78#include "ecm_tracker_datagram.h"
79#include "ecm_tracker_udp.h"
80#include "ecm_tracker_tcp.h"
81#include "ecm_db.h"
82#include "ecm_classifier_default.h"
83#include "ecm_interface.h"
84#include "ecm_sfe_ported_ipv6.h"
85#include "ecm_sfe_ipv6.h"
86#include "ecm_sfe_common.h"
87
88/*
89 * Magic numbers
90 */
91#define ECM_SFE_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC 0xEB9A
92
93/*
94 * Protocol type that ported file supports.
95 */
96enum ecm_sfe_ported_ipv6_proto_types {
97	ECM_SFE_PORTED_IPV6_PROTO_TCP = 0,
98	ECM_SFE_PORTED_IPV6_PROTO_UDP,
99	ECM_SFE_PORTED_IPV6_PROTO_MAX
100
101};
102
103/*
104 * struct ecm_sfe_ipv6_ported_connection_instance
105 *	A connection specific front end instance for PORTED connections
106 */
107struct ecm_sfe_ported_ipv6_connection_instance {
108	struct ecm_front_end_connection_instance base;		/* Base class */
109	uint8_t ported_accelerated_count_index;			/* Index value of accelerated count array (UDP or TCP) */
110#if (DEBUG_LEVEL > 0)
111	uint16_t magic;
112#endif
113};
114
115static int ecm_sfe_ported_ipv6_accelerated_count[ECM_SFE_PORTED_IPV6_PROTO_MAX] = {0};
116						/* Array of Number of TCP and UDP connections currently offloaded */
117
118/*
119 * Expose what should be a static flag in the TCP connection tracker.
120 */
121#ifdef ECM_OPENWRT_SUPPORT
122extern int nf_ct_tcp_no_window_check;
123#endif
124extern int nf_ct_tcp_be_liberal;
125
126/*
127 * ecm_sfe_ported_ipv6_connection_callback()
128 *	Callback for handling create ack/nack calls.
129 */
130static void ecm_sfe_ported_ipv6_connection_callback(void *app_data, struct sfe_ipv6_msg *nim)
131{
132	struct sfe_ipv6_rule_create_msg *nircm = &nim->msg.rule_create;
133	uint32_t serial = (uint32_t)app_data;
134	struct ecm_db_connection_instance *ci;
135	struct ecm_front_end_connection_instance *feci;
136	struct ecm_sfe_ported_ipv6_connection_instance *npci;
137	ip_addr_t flow_ip;
138	ip_addr_t return_ip;
139	ecm_front_end_acceleration_mode_t result_mode;
140
141	/*
142	 * Is this a response to a create message?
143	 */
144	if (nim->cm.type != SFE_TX_CREATE_RULE_MSG) {
145		DEBUG_ERROR("%p: ported create callback with improper type: %d, serial: %u\n", nim, nim->cm.type, serial);
146		return;
147	}
148
149	/*
150	 * Look up ecm connection so that we can update the status.
151	 */
152	ci = ecm_db_connection_serial_find_and_ref(serial);
153	if (!ci) {
154		DEBUG_TRACE("%p: create callback, connection not found, serial: %u\n", nim, serial);
155		return;
156	}
157
158	/*
159	 * Release ref held for this ack/nack response.
160	 * NOTE: It's okay to do this here, ci won't go away, because the ci is held as
161	 * a result of the ecm_db_connection_serial_find_and_ref()
162	 */
163	ecm_db_connection_deref(ci);
164
165	/*
166	 * Get the front end instance
167	 */
168	feci = ecm_db_connection_front_end_get_and_ref(ci);
169	npci = (struct ecm_sfe_ported_ipv6_connection_instance *)feci;
170	DEBUG_CHECK_MAGIC(npci, ECM_SFE_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", npci);
171
172	ECM_SFE_IPV6_ADDR_TO_IP_ADDR(flow_ip, nircm->tuple.flow_ip);
173	ECM_SFE_IPV6_ADDR_TO_IP_ADDR(return_ip, nircm->tuple.return_ip);
174
175	/*
176	 * Record command duration
177	 */
178	ecm_sfe_ipv6_accel_done_time_update(feci);
179
180	/*
181	 * Dump some useful trace information.
182	 */
183	DEBUG_TRACE("%p: accelerate response for connection: %p, serial: %u\n", npci, feci->ci, serial);
184	DEBUG_TRACE("%p: rule_flags: %x, valid_flags: %x\n", npci, nircm->rule_flags, nircm->valid_flags);
185	DEBUG_TRACE("%p: flow_ip: " ECM_IP_ADDR_OCTAL_FMT ":%d\n", npci, ECM_IP_ADDR_TO_OCTAL(flow_ip), nircm->tuple.flow_ident);
186	DEBUG_TRACE("%p: return_ip: " ECM_IP_ADDR_OCTAL_FMT ":%d\n", npci, ECM_IP_ADDR_TO_OCTAL(return_ip), nircm->tuple.return_ident);
187	DEBUG_TRACE("%p: protocol: %d\n", npci, nircm->tuple.protocol);
188
189	/*
190	 * Handle the creation result code.
191	 */
192	DEBUG_TRACE("%p: response: %d\n", npci, nim->cm.response);
193	if (nim->cm.response != SFE_CMN_RESPONSE_ACK) {
194		/*
195		 * Creation command failed (specific reason ignored).
196		 */
197		DEBUG_TRACE("%p: accel nack: %d\n", npci, nim->cm.error);
198		spin_lock_bh(&feci->lock);
199		DEBUG_ASSERT(feci->accel_mode == ECM_FRONT_END_ACCELERATION_MODE_ACCEL_PENDING, "%p: Unexpected mode: %d\n", ci, feci->accel_mode);
200		feci->stats.ae_nack++;
201		feci->stats.ae_nack_total++;
202		if (feci->stats.ae_nack >= feci->stats.ae_nack_limit) {
203			/*
204			 * Too many SFE rejections
205			 */
206			result_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_ACCEL_ENGINE;
207		} else {
208			/*
209			 * Revert to decelerated
210			 */
211			result_mode = ECM_FRONT_END_ACCELERATION_MODE_DECEL;
212		}
213
214		/*
215		 * If connection is now defunct then set mode to ensure no further accel attempts occur
216		 */
217		if (feci->is_defunct) {
218			result_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT;
219		}
220
221		spin_lock_bh(&ecm_sfe_ipv6_lock);
222		_ecm_sfe_ipv6_accel_pending_clear(feci, result_mode);
223		spin_unlock_bh(&ecm_sfe_ipv6_lock);
224
225		spin_unlock_bh(&feci->lock);
226
227		/*
228		 * Release the connection.
229		 */
230		feci->deref(feci);
231		ecm_db_connection_deref(ci);
232		return;
233	}
234
235	spin_lock_bh(&feci->lock);
236	DEBUG_ASSERT(feci->accel_mode == ECM_FRONT_END_ACCELERATION_MODE_ACCEL_PENDING, "%p: Unexpected mode: %d\n", ci, feci->accel_mode);
237
238	/*
239	 * If a flush occured before we got the ACK then our acceleration was effectively cancelled on us
240	 * GGG TODO This is a workaround for a SFE message OOO quirk, this should eventually be removed.
241	 */
242	if (feci->stats.flush_happened) {
243		feci->stats.flush_happened = false;
244
245		/*
246		 * Increment the no-action counter.  Our connection was decelerated on us with no action occurring.
247		 */
248		feci->stats.no_action_seen++;
249
250		spin_lock_bh(&ecm_sfe_ipv6_lock);
251		_ecm_sfe_ipv6_accel_pending_clear(feci, ECM_FRONT_END_ACCELERATION_MODE_DECEL);
252		spin_unlock_bh(&ecm_sfe_ipv6_lock);
253
254		spin_unlock_bh(&feci->lock);
255
256		/*
257		 * Release the connection.
258		 */
259		feci->deref(feci);
260		ecm_db_connection_deref(ci);
261		return;
262	}
263
264	/*
265	 * Create succeeded
266	 */
267
268	/*
269	 * Clear any nack count
270	 */
271	feci->stats.ae_nack = 0;
272
273	/*
274	 * Clear the "accelerate pending" state and move to "accelerated" state bumping
275	 * the accelerated counters to match our new state.
276	 *
277	 * Decelerate may have been attempted while we were "pending accel" and
278	 * this function will return true if that was the case.
279	 * If decelerate was pending then we need to begin deceleration :-(
280	 */
281	spin_lock_bh(&ecm_sfe_ipv6_lock);
282
283	ecm_sfe_ported_ipv6_accelerated_count[npci->ported_accelerated_count_index]++;	/* Protocol specific counter */
284	ecm_sfe_ipv6_accelerated_count++;		/* General running counter */
285
286	if (!_ecm_sfe_ipv6_accel_pending_clear(feci, ECM_FRONT_END_ACCELERATION_MODE_ACCEL)) {
287		/*
288		 * Increment the no-action counter, this is reset if offload action is seen
289		 */
290		feci->stats.no_action_seen++;
291
292		spin_unlock_bh(&ecm_sfe_ipv6_lock);
293		spin_unlock_bh(&feci->lock);
294
295		/*
296		 * Release the connection.
297		 */
298		feci->deref(feci);
299		ecm_db_connection_deref(ci);
300		return;
301	}
302
303	DEBUG_INFO("%p: Decelerate was pending\n", ci);
304
305	spin_unlock_bh(&ecm_sfe_ipv6_lock);
306	spin_unlock_bh(&feci->lock);
307
308	feci->decelerate(feci);
309
310	/*
311	 * Release the connection.
312	 */
313	feci->deref(feci);
314	ecm_db_connection_deref(ci);
315}
316
317/*
318 * ecm_sfe_ported_ipv6_connection_accelerate()
319 *	Accelerate a connection
320 */
321static void ecm_sfe_ported_ipv6_connection_accelerate(struct ecm_front_end_connection_instance *feci,
322									struct ecm_classifier_process_response *pr,
323									struct nf_conn *ct, bool is_l2_encap)
324{
325	struct ecm_sfe_ported_ipv6_connection_instance *npci = (struct ecm_sfe_ported_ipv6_connection_instance *)feci;
326	uint16_t regen_occurrances;
327	int protocol;
328	int32_t from_ifaces_first;
329	int32_t to_ifaces_first;
330	struct ecm_db_iface_instance *from_ifaces[ECM_DB_IFACE_HEIRARCHY_MAX];
331	struct ecm_db_iface_instance *to_ifaces[ECM_DB_IFACE_HEIRARCHY_MAX];
332	struct ecm_db_iface_instance *from_sfe_iface;
333	struct ecm_db_iface_instance *to_sfe_iface;
334	int32_t from_sfe_iface_id;
335	int32_t to_sfe_iface_id;
336	uint8_t from_sfe_iface_address[ETH_ALEN];
337	uint8_t to_sfe_iface_address[ETH_ALEN];
338	struct sfe_ipv6_msg nim;
339	struct sfe_ipv6_rule_create_msg *nircm;
340	struct ecm_classifier_instance *assignments[ECM_CLASSIFIER_TYPES];
341	int aci_index;
342	int assignment_count;
343	sfe_tx_status_t sfe_tx_status;
344	int32_t list_index;
345	int32_t interface_type_counts[ECM_DB_IFACE_TYPE_COUNT];
346	bool rule_invalid;
347	ip_addr_t src_ip;
348	ip_addr_t dest_ip;
349	ecm_front_end_acceleration_mode_t result_mode;
350
351	DEBUG_CHECK_MAGIC(npci, ECM_SFE_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", npci);
352
353	/*
354	 * Get the re-generation occurrance counter of the connection.
355	 * We compare it again at the end - to ensure that the rule construction has seen no generation
356	 * changes during rule creation.
357	 */
358	regen_occurrances = ecm_db_connection_regeneration_occurrances_get(feci->ci);
359
360	/*
361	 * Test if acceleration is permitted
362	 */
363	if (!ecm_sfe_ipv6_accel_pending_set(feci)) {
364		DEBUG_TRACE("%p: Acceleration not permitted: %p\n", feci, feci->ci);
365		return;
366	}
367
368	/*
369	 * Okay construct an accel command.
370	 * Initialise creation structure.
371	 * NOTE: We leverage the app_data void pointer to be our 32 bit connection serial number.
372	 * When we get it back we re-cast it to a uint32 and do a faster connection lookup.
373	 */
374	memset(&nim, 0, sizeof(struct sfe_ipv6_msg));
375	sfe_ipv6_msg_init(&nim, SFE_SPECIAL_INTERFACE_IPV6, SFE_TX_CREATE_RULE_MSG,
376			sizeof(struct sfe_ipv6_rule_create_msg),
377			ecm_sfe_ported_ipv6_connection_callback,
378			(void *)ecm_db_connection_serial_get(feci->ci));
379
380	nircm = &nim.msg.rule_create;
381	nircm->valid_flags = 0;
382	nircm->rule_flags = 0;
383
384	/*
385	 * Initialize VLAN tag information
386	 */
387	nircm->vlan_primary_rule.ingress_vlan_tag = SFE_VLAN_ID_NOT_CONFIGURED;
388	nircm->vlan_primary_rule.egress_vlan_tag = SFE_VLAN_ID_NOT_CONFIGURED;
389	nircm->vlan_secondary_rule.ingress_vlan_tag = SFE_VLAN_ID_NOT_CONFIGURED;
390	nircm->vlan_secondary_rule.egress_vlan_tag = SFE_VLAN_ID_NOT_CONFIGURED;
391
392	/*
393	 * Get the interface lists of the connection, we must have at least one interface in the list to continue
394	 */
395	from_ifaces_first = ecm_db_connection_from_interfaces_get_and_ref(feci->ci, from_ifaces);
396	if (from_ifaces_first == ECM_DB_IFACE_HEIRARCHY_MAX) {
397		DEBUG_WARN("%p: Accel attempt failed - no interfaces in from_interfaces list!\n", npci);
398		goto ported_accel_bad_rule;
399	}
400
401	to_ifaces_first = ecm_db_connection_to_interfaces_get_and_ref(feci->ci, to_ifaces);
402	if (to_ifaces_first == ECM_DB_IFACE_HEIRARCHY_MAX) {
403		DEBUG_WARN("%p: Accel attempt failed - no interfaces in to_interfaces list!\n", npci);
404		ecm_db_connection_interfaces_deref(from_ifaces, from_ifaces_first);
405		goto ported_accel_bad_rule;
406	}
407
408	/*
409	 * First interface in each must be a known sfe interface
410	 */
411	from_sfe_iface = from_ifaces[from_ifaces_first];
412	to_sfe_iface = to_ifaces[to_ifaces_first];
413	from_sfe_iface_id = ecm_db_iface_ae_interface_identifier_get(from_sfe_iface);
414	to_sfe_iface_id = ecm_db_iface_ae_interface_identifier_get(to_sfe_iface);
415	if ((from_sfe_iface_id < 0) || (to_sfe_iface_id < 0)) {
416		DEBUG_TRACE("%p: from_sfe_iface_id: %d, to_sfe_iface_id: %d\n", npci, from_sfe_iface_id, to_sfe_iface_id);
417		ecm_db_connection_interfaces_deref(from_ifaces, from_ifaces_first);
418		ecm_db_connection_interfaces_deref(to_ifaces, to_ifaces_first);
419		goto ported_accel_bad_rule;
420	}
421
422	/*
423	 * New rule being created
424	 */
425	nircm->valid_flags |= SFE_RULE_CREATE_CONN_VALID;
426
427	/*
428	 * Set interface numbers involved in accelerating this connection.
429	 * These are the outer facing addresses from the heirarchy interface lists we got above.
430	 * These may be overridden later if we detect special interface types e.g. ipsec.
431	 */
432	nircm->conn_rule.flow_interface_num = from_sfe_iface_id;
433	nircm->conn_rule.return_interface_num = to_sfe_iface_id;
434
435	/*
436	 * Set interface numbers involved in accelerating this connection.
437	 * These are the inner facing addresses from the heirarchy interface lists we got above.
438	 */
439	nim.msg.rule_create.conn_rule.flow_top_interface_num = ecm_db_iface_interface_identifier_get(from_ifaces[ECM_DB_IFACE_HEIRARCHY_MAX-1]);
440	nim.msg.rule_create.conn_rule.return_top_interface_num = ecm_db_iface_interface_identifier_get(to_ifaces[ECM_DB_IFACE_HEIRARCHY_MAX-1]);
441
442	/*
443	 * We know that each outward facing interface is known to the SFE and so this connection could be accelerated.
444	 * However the lists may also specify other interesting details that must be included in the creation command,
445	 * for example, ethernet MAC, VLAN tagging or PPPoE session information.
446	 * We get this information by walking from the outer to the innermost interface for each list and examine the interface types.
447	 *
448	 * Start with the 'from' (src) side.
449	 * NOTE: The lists may contain a complex heirarchy of similar type of interface e.g. multiple vlans or tunnels within tunnels.
450	 * This SFE cannot handle that - there is no way to describe this in the rule - if we see multiple types that would conflict we have to abort.
451	 */
452	DEBUG_TRACE("%p: Examine from/src heirarchy list\n", npci);
453	memset(interface_type_counts, 0, sizeof(interface_type_counts));
454	rule_invalid = false;
455	for (list_index = from_ifaces_first; !rule_invalid && (list_index < ECM_DB_IFACE_HEIRARCHY_MAX); list_index++) {
456		struct ecm_db_iface_instance *ii;
457		ecm_db_iface_type_t ii_type;
458		char *ii_name;
459
460		ii = from_ifaces[list_index];
461		ii_type = ecm_db_connection_iface_type_get(ii);
462		ii_name = ecm_db_interface_type_to_string(ii_type);
463		DEBUG_TRACE("%p: list_index: %d, ii: %p, type: %d (%s)\n", npci, list_index, ii, ii_type, ii_name);
464
465		/*
466		 * Extract information from this interface type if it is applicable to the rule.
467		 * Conflicting information may cause accel to be unsupported.
468		 */
469		switch (ii_type) {
470#ifdef ECM_INTERFACE_PPP_ENABLE
471			struct ecm_db_interface_info_pppoe pppoe_info;
472#endif
473#ifdef ECM_INTERFACE_VLAN_ENABLE
474			struct ecm_db_interface_info_vlan vlan_info;
475			uint32_t vlan_value = 0;
476			struct net_device *vlan_in_dev = NULL;
477#endif
478		case ECM_DB_IFACE_TYPE_BRIDGE:
479			DEBUG_TRACE("%p: Bridge\n", npci);
480			if (interface_type_counts[ii_type] != 0) {
481				/*
482				 * Cannot cascade bridges
483				 */
484				rule_invalid = true;
485				DEBUG_TRACE("%p: Bridge - ignore additional\n", npci);
486				break;
487			}
488			ecm_db_iface_bridge_address_get(ii, from_sfe_iface_address);
489			DEBUG_TRACE("%p: Bridge - mac: %pM\n", npci, from_sfe_iface_address);
490			break;
491		case ECM_DB_IFACE_TYPE_ETHERNET:
492			DEBUG_TRACE("%p: Ethernet\n", npci);
493			if (interface_type_counts[ii_type] != 0) {
494				/*
495				 * Ignore additional mac addresses, these are usually as a result of address propagation
496				 * from bridges down to ports etc.
497				 */
498				DEBUG_TRACE("%p: Ethernet - ignore additional\n", npci);
499				break;
500			}
501
502			/*
503			 * Can only handle one MAC, the first outermost mac.
504			 */
505			ecm_db_iface_ethernet_address_get(ii, from_sfe_iface_address);
506			DEBUG_TRACE("%p: Ethernet - mac: %pM\n", npci, from_sfe_iface_address);
507			break;
508		case ECM_DB_IFACE_TYPE_PPPOE:
509#ifdef ECM_INTERFACE_PPP_ENABLE
510			/*
511			 * More than one PPPoE in the list is not valid!
512			 */
513			if (interface_type_counts[ii_type] != 0) {
514				DEBUG_TRACE("%p: PPPoE - additional unsupported\n", npci);
515				rule_invalid = true;
516				break;
517			}
518
519			/*
520			 * Copy pppoe session info to the creation structure.
521			 */
522			ecm_db_iface_pppoe_session_info_get(ii, &pppoe_info);
523
524			nircm->pppoe_rule.flow_pppoe_session_id = pppoe_info.pppoe_session_id;
525			memcpy(nircm->pppoe_rule.flow_pppoe_remote_mac, pppoe_info.remote_mac, ETH_ALEN);
526			nircm->valid_flags |= SFE_RULE_CREATE_PPPOE_VALID;
527
528			DEBUG_TRACE("%p: PPPoE - session: %x, mac: %pM\n", npci,
529					nircm->pppoe_rule.flow_pppoe_session_id,
530					nircm->pppoe_rule.flow_pppoe_remote_mac);
531#else
532			rule_invalid = true;
533#endif
534			break;
535		case ECM_DB_IFACE_TYPE_VLAN:
536#ifdef ECM_INTERFACE_VLAN_ENABLE
537			DEBUG_TRACE("%p: VLAN\n", npci);
538			if (interface_type_counts[ii_type] > 1) {
539				/*
540				 * Can only support two vlans
541				 */
542				rule_invalid = true;
543				DEBUG_TRACE("%p: VLAN - additional unsupported\n", npci);
544				break;
545			}
546			ecm_db_iface_vlan_info_get(ii, &vlan_info);
547			vlan_value = ((vlan_info.vlan_tpid << 16) | vlan_info.vlan_tag);
548
549			/*
550			 * Look up the vlan device and incorporate the vlan priority into the vlan_value
551			 */
552			vlan_in_dev = dev_get_by_index(&init_net, ecm_db_iface_interface_identifier_get(ii));
553			if (vlan_in_dev) {
554				vlan_value |= vlan_dev_get_egress_prio(vlan_in_dev, pr->return_qos_tag);
555				dev_put(vlan_in_dev);
556				vlan_in_dev = NULL;
557			}
558
559			/*
560			 * Primary or secondary (QinQ) VLAN?
561			 */
562			if (interface_type_counts[ii_type] == 0) {
563				nircm->vlan_primary_rule.ingress_vlan_tag = vlan_value;
564			} else {
565				nircm->vlan_secondary_rule.ingress_vlan_tag = vlan_value;
566			}
567			nircm->valid_flags |= SFE_RULE_CREATE_VLAN_VALID;
568
569			/*
570			 * If we have not yet got an ethernet mac then take this one (very unlikely as mac should have been propagated to the slave (outer) device
571			 */
572			if (interface_type_counts[ECM_DB_IFACE_TYPE_ETHERNET] == 0) {
573				memcpy(from_sfe_iface_address, vlan_info.address, ETH_ALEN);
574				interface_type_counts[ECM_DB_IFACE_TYPE_ETHERNET]++;
575				DEBUG_TRACE("%p: VLAN use mac: %pM\n", npci, from_sfe_iface_address);
576			}
577			DEBUG_TRACE("%p: vlan tag: %x\n", npci, vlan_value);
578#else
579			rule_invalid = true;
580			DEBUG_TRACE("%p: VLAN - unsupported\n", npci);
581#endif
582			break;
583		case ECM_DB_IFACE_TYPE_IPSEC_TUNNEL:
584#ifdef ECM_INTERFACE_IPSEC_ENABLE
585			DEBUG_TRACE("%p: IPSEC\n", npci);
586			if (interface_type_counts[ii_type] != 0) {
587				/*
588				 * Can only support one ipsec
589				 */
590				rule_invalid = true;
591				DEBUG_TRACE("%p: IPSEC - additional unsupported\n", npci);
592				break;
593			}
594			nircm->conn_rule.flow_interface_num = SFE_SPECIAL_INTERFACE_IPSEC;
595#else
596			rule_invalid = true;
597			DEBUG_TRACE("%p: IPSEC - unsupported\n", npci);
598#endif
599			break;
600		default:
601			DEBUG_TRACE("%p: Ignoring: %d (%s)\n", npci, ii_type, ii_name);
602		}
603
604		/*
605		 * Seen an interface of this type
606		 */
607		interface_type_counts[ii_type]++;
608	}
609	if (rule_invalid) {
610		DEBUG_WARN("%p: from/src Rule invalid\n", npci);
611		ecm_db_connection_interfaces_deref(from_ifaces, from_ifaces_first);
612		ecm_db_connection_interfaces_deref(to_ifaces, to_ifaces_first);
613		goto ported_accel_bad_rule;
614	}
615
616	/*
617	 * Now examine the TO / DEST heirarchy list to construct the destination part of the rule
618	 */
619	DEBUG_TRACE("%p: Examine to/dest heirarchy list\n", npci);
620	memset(interface_type_counts, 0, sizeof(interface_type_counts));
621	rule_invalid = false;
622	for (list_index = to_ifaces_first; !rule_invalid && (list_index < ECM_DB_IFACE_HEIRARCHY_MAX); list_index++) {
623		struct ecm_db_iface_instance *ii;
624		ecm_db_iface_type_t ii_type;
625		char *ii_name;
626
627		ii = to_ifaces[list_index];
628		ii_type = ecm_db_connection_iface_type_get(ii);
629		ii_name = ecm_db_interface_type_to_string(ii_type);
630		DEBUG_TRACE("%p: list_index: %d, ii: %p, type: %d (%s)\n", npci, list_index, ii, ii_type, ii_name);
631
632		/*
633		 * Extract information from this interface type if it is applicable to the rule.
634		 * Conflicting information may cause accel to be unsupported.
635		 */
636		switch (ii_type) {
637#ifdef ECM_INTERFACE_PPP_ENABLE
638			struct ecm_db_interface_info_pppoe pppoe_info;
639#endif
640#ifdef ECM_INTERFACE_VLAN_ENABLE
641			struct ecm_db_interface_info_vlan vlan_info;
642			uint32_t vlan_value = 0;
643			struct net_device *vlan_out_dev = NULL;
644#endif
645		case ECM_DB_IFACE_TYPE_BRIDGE:
646			DEBUG_TRACE("%p: Bridge\n", npci);
647			if (interface_type_counts[ii_type] != 0) {
648				/*
649				 * Cannot cascade bridges
650				 */
651				rule_invalid = true;
652				DEBUG_TRACE("%p: Bridge - ignore additional\n", npci);
653				break;
654			}
655			ecm_db_iface_bridge_address_get(ii, to_sfe_iface_address);
656			DEBUG_TRACE("%p: Bridge - mac: %pM\n", npci, to_sfe_iface_address);
657			break;
658		case ECM_DB_IFACE_TYPE_ETHERNET:
659			DEBUG_TRACE("%p: Ethernet\n", npci);
660			if (interface_type_counts[ii_type] != 0) {
661				/*
662				 * Ignore additional mac addresses, these are usually as a result of address propagation
663				 * from bridges down to ports etc.
664				 */
665				DEBUG_TRACE("%p: Ethernet - ignore additional\n", npci);
666				break;
667			}
668
669			/*
670			 * Can only handle one MAC, the first outermost mac.
671			 */
672			ecm_db_iface_ethernet_address_get(ii, to_sfe_iface_address);
673			DEBUG_TRACE("%p: Ethernet - mac: %pM\n", npci, to_sfe_iface_address);
674			break;
675		case ECM_DB_IFACE_TYPE_PPPOE:
676#ifdef ECM_INTERFACE_PPP_ENABLE
677			/*
678			 * More than one PPPoE in the list is not valid!
679			 */
680			if (interface_type_counts[ii_type] != 0) {
681				DEBUG_TRACE("%p: PPPoE - additional unsupported\n", npci);
682				rule_invalid = true;
683				break;
684			}
685
686			/*
687			 * Copy pppoe session info to the creation structure.
688			 */
689			ecm_db_iface_pppoe_session_info_get(ii, &pppoe_info);
690			nircm->pppoe_rule.return_pppoe_session_id = pppoe_info.pppoe_session_id;
691			memcpy(nircm->pppoe_rule.return_pppoe_remote_mac, pppoe_info.remote_mac, ETH_ALEN);
692			nircm->valid_flags |= SFE_RULE_CREATE_PPPOE_VALID;
693
694			DEBUG_TRACE("%p: PPPoE - session: %x, mac: %pM\n", npci,
695				    nircm->pppoe_rule.return_pppoe_session_id,
696				    nircm->pppoe_rule.return_pppoe_remote_mac);
697#else
698			rule_invalid = true;
699#endif
700			break;
701		case ECM_DB_IFACE_TYPE_VLAN:
702#ifdef ECM_INTERFACE_VLAN_ENABLE
703			DEBUG_TRACE("%p: VLAN\n", npci);
704			if (interface_type_counts[ii_type] > 1) {
705				/*
706				 * Can only support two vlans
707				 */
708				rule_invalid = true;
709				DEBUG_TRACE("%p: VLAN - additional unsupported\n", npci);
710				break;
711			}
712			ecm_db_iface_vlan_info_get(ii, &vlan_info);
713			vlan_value = ((vlan_info.vlan_tpid << 16) | vlan_info.vlan_tag);
714
715			/*
716			 * Look up the vlan device and incorporate the vlan priority into the vlan_value
717			 */
718			vlan_out_dev = dev_get_by_index(&init_net, ecm_db_iface_interface_identifier_get(ii));
719			if (vlan_out_dev) {
720				vlan_value |= vlan_dev_get_egress_prio(vlan_out_dev, pr->flow_qos_tag);
721				dev_put(vlan_out_dev);
722				vlan_out_dev = NULL;
723			}
724
725			/*
726			 * Primary or secondary (QinQ) VLAN?
727			 */
728			if (interface_type_counts[ii_type] == 0) {
729				nircm->vlan_primary_rule.egress_vlan_tag = vlan_value;
730			} else {
731				nircm->vlan_secondary_rule.egress_vlan_tag = vlan_value;
732			}
733			nircm->valid_flags |= SFE_RULE_CREATE_VLAN_VALID;
734
735			/*
736			 * If we have not yet got an ethernet mac then take this one (very unlikely as mac should have been propagated to the slave (outer) device
737			 */
738			if (interface_type_counts[ECM_DB_IFACE_TYPE_ETHERNET] == 0) {
739				memcpy(to_sfe_iface_address, vlan_info.address, ETH_ALEN);
740				interface_type_counts[ECM_DB_IFACE_TYPE_ETHERNET]++;
741				DEBUG_TRACE("%p: VLAN use mac: %pM\n", npci, to_sfe_iface_address);
742			}
743			DEBUG_TRACE("%p: vlan tag: %x\n", npci, vlan_value);
744#else
745			rule_invalid = true;
746			DEBUG_TRACE("%p: VLAN - unsupported\n", npci);
747#endif
748			break;
749		case ECM_DB_IFACE_TYPE_IPSEC_TUNNEL:
750#ifdef ECM_INTERFACE_IPSEC_ENABLE
751			DEBUG_TRACE("%p: IPSEC\n", npci);
752			if (interface_type_counts[ii_type] != 0) {
753				/*
754				 * Can only support one ipsec
755				 */
756				rule_invalid = true;
757				DEBUG_TRACE("%p: IPSEC - additional unsupported\n", npci);
758				break;
759			}
760			nircm->conn_rule.return_interface_num = SFE_SPECIAL_INTERFACE_IPSEC;
761#else
762			rule_invalid = true;
763			DEBUG_TRACE("%p: IPSEC - unsupported\n", npci);
764#endif
765			break;
766		default:
767			DEBUG_TRACE("%p: Ignoring: %d (%s)\n", npci, ii_type, ii_name);
768		}
769
770		/*
771		 * Seen an interface of this type
772		 */
773		interface_type_counts[ii_type]++;
774	}
775	if (rule_invalid) {
776		DEBUG_WARN("%p: from/src Rule invalid\n", npci);
777		ecm_db_connection_interfaces_deref(from_ifaces, from_ifaces_first);
778		ecm_db_connection_interfaces_deref(to_ifaces, to_ifaces_first);
779		goto ported_accel_bad_rule;
780	}
781
782	/*
783	 * Routed or bridged?
784	 */
785	if (ecm_db_connection_is_routed_get(feci->ci)) {
786		nircm->rule_flags |= SFE_RULE_CREATE_FLAG_ROUTED;
787	} else {
788		nircm->rule_flags |= SFE_RULE_CREATE_FLAG_BRIDGE_FLOW;
789		if (is_l2_encap) {
790			nircm->rule_flags |= SFE_RULE_CREATE_FLAG_L2_ENCAP;
791		}
792	}
793
794	/*
795	 * Set up the flow and return qos tags
796	 */
797	nircm->qos_rule.flow_qos_tag = (uint32_t)pr->flow_qos_tag;
798	nircm->qos_rule.return_qos_tag = (uint32_t)pr->return_qos_tag;
799	nircm->valid_flags |= SFE_RULE_CREATE_QOS_VALID;
800
801#ifdef ECM_CLASSIFIER_DSCP_ENABLE
802	/*
803	 * DSCP information?
804	 */
805	if (pr->process_actions & ECM_CLASSIFIER_PROCESS_ACTION_DSCP) {
806		nircm->dscp_rule.flow_dscp = pr->flow_dscp;
807		nircm->dscp_rule.return_dscp = pr->return_dscp;
808		nircm->rule_flags |= SFE_RULE_CREATE_FLAG_DSCP_MARKING;
809		nircm->valid_flags |= SFE_RULE_CREATE_DSCP_MARKING_VALID;
810	}
811#endif
812	protocol = ecm_db_connection_protocol_get(feci->ci);
813
814	/*
815	 * Set protocol
816	 */
817	nircm->tuple.protocol = (int32_t)protocol;
818
819	/*
820	 * The flow_ip is where the connection established from
821	 */
822	ecm_db_connection_from_address_get(feci->ci, src_ip);
823	ECM_IP_ADDR_TO_SFE_IPV6_ADDR(nircm->tuple.flow_ip, src_ip);
824
825	/*
826	 * The dest_ip is where the connection is established to
827	 */
828	ecm_db_connection_to_address_get(feci->ci, dest_ip);
829	ECM_IP_ADDR_TO_SFE_IPV6_ADDR(nircm->tuple.return_ip, dest_ip);
830
831	/*
832	 * Same approach as above for port information
833	 */
834	nircm->tuple.flow_ident = htons(ecm_db_connection_from_port_get(feci->ci));
835	nircm->tuple.return_ident = htons(ecm_db_connection_to_port_get(feci->ci));
836
837	/*
838	 * Get mac addresses.
839	 * The src_mac is the mac address of the node that established the connection.
840	 * This will work whether the from_node is LAN (egress) or WAN (ingress).
841	 */
842	ecm_db_connection_from_node_address_get(feci->ci, (uint8_t *)nircm->conn_rule.flow_mac);
843
844	/*
845	 * The dest_mac is more complex.  For egress it is the node address of the 'to' side of the connection.
846	 * For ingress it is the node adress of the NAT'ed 'to' IP.
847	 * Essentially it is the MAC of node associated with create.dest_ip and this is "to nat" side.
848	 */
849	ecm_db_connection_to_node_address_get(feci->ci, (uint8_t *)nircm->conn_rule.return_mac);
850
851	/*
852	 * Get MTU information
853	 */
854	nircm->conn_rule.flow_mtu = (uint32_t)ecm_db_connection_from_iface_mtu_get(feci->ci);
855	nircm->conn_rule.return_mtu = (uint32_t)ecm_db_connection_to_iface_mtu_get(feci->ci);
856
857	if (protocol == IPPROTO_TCP) {
858		/*
859		 * Need window scaling information from conntrack if available
860		 * Start by looking up the conntrack connection
861		 */
862		if (!ct) {
863			/*
864			 * No conntrack so no need to check window sequence space
865			 */
866			DEBUG_TRACE("%p: TCP Accel no ct from conn %p to get window data\n", npci, feci->ci);
867			nircm->rule_flags |= SFE_RULE_CREATE_FLAG_NO_SEQ_CHECK;
868		} else {
869			spin_lock_bh(&ct->lock);
870			DEBUG_TRACE("%p: TCP Accel Get window data from ct %p for conn %p\n", npci, ct, feci->ci);
871
872			nircm->tcp_rule.flow_window_scale = ct->proto.tcp.seen[0].td_scale;
873			nircm->tcp_rule.flow_max_window = ct->proto.tcp.seen[0].td_maxwin;
874			nircm->tcp_rule.flow_end = ct->proto.tcp.seen[0].td_end;
875			nircm->tcp_rule.flow_max_end = ct->proto.tcp.seen[0].td_maxend;
876			nircm->tcp_rule.return_window_scale = ct->proto.tcp.seen[1].td_scale;
877			nircm->tcp_rule.return_max_window = ct->proto.tcp.seen[1].td_maxwin;
878			nircm->tcp_rule.return_end = ct->proto.tcp.seen[1].td_end;
879			nircm->tcp_rule.return_max_end = ct->proto.tcp.seen[1].td_maxend;
880#ifdef ECM_OPENWRT_SUPPORT
881			if (nf_ct_tcp_be_liberal || nf_ct_tcp_no_window_check
882#else
883			if (nf_ct_tcp_be_liberal
884#endif
885					|| (ct->proto.tcp.seen[0].flags & IP_CT_TCP_FLAG_BE_LIBERAL)
886					|| (ct->proto.tcp.seen[1].flags & IP_CT_TCP_FLAG_BE_LIBERAL)) {
887				nircm->rule_flags |= SFE_RULE_CREATE_FLAG_NO_SEQ_CHECK;
888			}
889			spin_unlock_bh(&ct->lock);
890		}
891
892		nircm->valid_flags |= SFE_RULE_CREATE_TCP_VALID;
893	}
894
895	/*
896	 * Sync our creation command from the assigned classifiers to get specific additional creation rules.
897	 * NOTE: These are called in ascending order of priority and so the last classifier (highest) shall
898	 * override any preceding classifiers.
899	 * This also gives the classifiers a chance to see that acceleration is being attempted.
900	 */
901	assignment_count = ecm_db_connection_classifier_assignments_get_and_ref(feci->ci, assignments);
902	for (aci_index = 0; aci_index < assignment_count; ++aci_index) {
903		struct ecm_classifier_instance *aci;
904		struct ecm_classifier_rule_create ecrc;
905		/*
906		 * NOTE: The current classifiers do not sync anything to the underlying accel engines.
907		 * In the future, if any of the classifiers wants to pass any parameter, these parameters
908		 * should be received via this object and copied to the accel engine's create object (nircm).
909		*/
910		aci = assignments[aci_index];
911		DEBUG_TRACE("%p: sync from: %p, type: %d\n", npci, aci, aci->type_get(aci));
912		aci->sync_from_v6(aci, &ecrc);
913	}
914	ecm_db_connection_assignments_release(assignment_count, assignments);
915
916	/*
917	 * Release the interface lists
918	 */
919	ecm_db_connection_interfaces_deref(from_ifaces, from_ifaces_first);
920	ecm_db_connection_interfaces_deref(to_ifaces, to_ifaces_first);
921
922	DEBUG_INFO("%p: Ported Accelerate connection %p\n"
923			"Protocol: %d\n"
924			"from_mtu: %u\n"
925			"to_mtu: %u\n"
926			"from_ip: " ECM_IP_ADDR_OCTAL_FMT ":%d\n"
927			"to_ip: " ECM_IP_ADDR_OCTAL_FMT ":%d\n"
928			"from_mac: %pM\n"
929			"to_mac: %pM\n"
930			"src_iface_num: %u\n"
931			"dest_iface_num: %u\n"
932			"ingress_inner_vlan_tag: %u\n"
933			"egress_inner_vlan_tag: %u\n"
934			"ingress_outer_vlan_tag: %u\n"
935			"egress_outer_vlan_tag: %u\n"
936			"rule_flags: %x\n"
937			"valid_flags: %x\n"
938			"return_pppoe_session_id: %u\n"
939			"return_pppoe_remote_mac: %pM\n"
940			"flow_pppoe_session_id: %u\n"
941			"flow_pppoe_remote_mac: %pM\n"
942			"flow_qos_tag: %x (%u)\n"
943			"return_qos_tag: %x (%u)\n"
944			"flow_dscp: %x\n"
945			"return_dscp: %x\n",
946			npci,
947			feci->ci,
948			nircm->tuple.protocol,
949			nircm->conn_rule.flow_mtu,
950			nircm->conn_rule.return_mtu,
951			ECM_IP_ADDR_TO_OCTAL(src_ip), nircm->tuple.flow_ident,
952			ECM_IP_ADDR_TO_OCTAL(dest_ip), nircm->tuple.return_ident,
953			nircm->conn_rule.flow_mac,
954			nircm->conn_rule.return_mac,
955			nircm->conn_rule.flow_interface_num,
956			nircm->conn_rule.return_interface_num,
957			nircm->vlan_primary_rule.ingress_vlan_tag,
958			nircm->vlan_primary_rule.egress_vlan_tag,
959			nircm->vlan_secondary_rule.ingress_vlan_tag,
960			nircm->vlan_secondary_rule.egress_vlan_tag,
961			nircm->rule_flags,
962			nircm->valid_flags,
963			nircm->pppoe_rule.return_pppoe_session_id,
964			nircm->pppoe_rule.return_pppoe_remote_mac,
965			nircm->pppoe_rule.flow_pppoe_session_id,
966			nircm->pppoe_rule.flow_pppoe_remote_mac,
967			nircm->qos_rule.flow_qos_tag, nircm->qos_rule.flow_qos_tag,
968			nircm->qos_rule.return_qos_tag, nircm->qos_rule.return_qos_tag,
969			nircm->dscp_rule.flow_dscp,
970			nircm->dscp_rule.return_dscp);
971
972	if (protocol == IPPROTO_TCP) {
973		DEBUG_INFO("flow_window_scale: %u\n"
974			"flow_max_window: %u\n"
975			"flow_end: %u\n"
976			"flow_max_end: %u\n"
977			"return_window_scale: %u\n"
978			"return_max_window: %u\n"
979			"return_end: %u\n"
980			"return_max_end: %u\n",
981			nircm->tcp_rule.flow_window_scale,
982			nircm->tcp_rule.flow_max_window,
983			nircm->tcp_rule.flow_end,
984			nircm->tcp_rule.flow_max_end,
985			nircm->tcp_rule.return_window_scale,
986			nircm->tcp_rule.return_max_window,
987			nircm->tcp_rule.return_end,
988			nircm->tcp_rule.return_max_end);
989	}
990
991	/*
992	 * Now that the rule has been constructed we re-compare the generation occurrance counter.
993	 * If there has been a change then we abort because the rule may have been created using
994	 * unstable data - especially if another thread has begun regeneration of the connection state.
995	 * NOTE: This does not prevent a regen from being flagged immediately after this line of code either,
996	 * or while the acceleration rule is in flight to the nss.
997	 * This is only to check for consistency of rule state - not that the state is stale.
998	 * Remember that the connection is marked as "accel pending state" so if a regen is flagged immediately
999	 * after this check passes, the connection will be decelerated and refreshed very quickly.
1000	 */
1001	if (regen_occurrances != ecm_db_connection_regeneration_occurrances_get(feci->ci)) {
1002		DEBUG_INFO("%p: connection:%p regen occurred - aborting accel rule.\n", feci, feci->ci);
1003		ecm_sfe_ipv6_accel_pending_clear(feci, ECM_FRONT_END_ACCELERATION_MODE_DECEL);
1004		return;
1005	}
1006
1007	/*
1008	 * Ref the connection before issuing an SFE rule
1009	 * This ensures that when the SFE responds to the command - which may even be immediately -
1010	 * the callback function can trust the correct ref was taken for its purpose.
1011	 * NOTE: remember that this will also implicitly hold the feci.
1012	 */
1013	ecm_db_connection_ref(feci->ci);
1014
1015	/*
1016	 * We are about to issue the command, record the time of transmission
1017	 */
1018	spin_lock_bh(&feci->lock);
1019	feci->stats.cmd_time_begun = jiffies;
1020	spin_unlock_bh(&feci->lock);
1021
1022	/*
1023	 * Call the rule create function
1024	 */
1025	sfe_tx_status = sfe_drv_ipv6_tx(ecm_sfe_ipv6_drv_mgr, &nim);
1026	if (sfe_tx_status == SFE_TX_SUCCESS) {
1027		/*
1028		 * Reset the driver_fail count - transmission was okay here.
1029		 */
1030		spin_lock_bh(&feci->lock);
1031		feci->stats.driver_fail = 0;
1032		spin_unlock_bh(&feci->lock);
1033		return;
1034	}
1035
1036	/*
1037	 * Release that ref!
1038	 */
1039	ecm_db_connection_deref(feci->ci);
1040
1041	/*
1042	 * TX failed
1043	 */
1044	spin_lock_bh(&feci->lock);
1045	DEBUG_ASSERT(feci->accel_mode == ECM_FRONT_END_ACCELERATION_MODE_ACCEL_PENDING, "%p: Accel mode unexpected: %d\n", npci, feci->accel_mode);
1046	feci->stats.driver_fail_total++;
1047	feci->stats.driver_fail++;
1048	if (feci->stats.driver_fail >= feci->stats.driver_fail_limit) {
1049		DEBUG_WARN("%p: Accel failed - driver fail limit\n", npci);
1050		result_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DRIVER;
1051	} else {
1052		result_mode = ECM_FRONT_END_ACCELERATION_MODE_DECEL;
1053	}
1054
1055	spin_lock_bh(&ecm_sfe_ipv6_lock);
1056	_ecm_sfe_ipv6_accel_pending_clear(feci, result_mode);
1057	spin_unlock_bh(&ecm_sfe_ipv6_lock);
1058
1059	spin_unlock_bh(&feci->lock);
1060	return;
1061
1062ported_accel_bad_rule:
1063	;
1064
1065	/*
1066	 * Jump to here when rule data is bad and an offload command cannot be constructed
1067	 */
1068	DEBUG_WARN("%p: Accel failed - bad rule\n", npci);
1069	ecm_sfe_ipv6_accel_pending_clear(feci, ECM_FRONT_END_ACCELERATION_MODE_FAIL_RULE);
1070}
1071
1072/*
1073 * ecm_sfe_ported_ipv6_connection_destroy_callback()
1074 *	Callback for handling destroy ack/nack calls.
1075 */
1076static void ecm_sfe_ported_ipv6_connection_destroy_callback(void *app_data, struct sfe_ipv6_msg *nim)
1077{
1078	struct sfe_ipv6_rule_destroy_msg *nirdm = &nim->msg.rule_destroy;
1079	uint32_t serial = (uint32_t)app_data;
1080	struct ecm_db_connection_instance *ci;
1081	struct ecm_front_end_connection_instance *feci;
1082	struct ecm_sfe_ported_ipv6_connection_instance *npci;
1083	ip_addr_t flow_ip;
1084	ip_addr_t return_ip;
1085
1086	/*
1087	 * Is this a response to a destroy message?
1088	 */
1089	if (nim->cm.type != SFE_TX_DESTROY_RULE_MSG) {
1090		DEBUG_ERROR("%p: ported destroy callback with improper type: %d\n", nim, nim->cm.type);
1091		return;
1092	}
1093
1094	/*
1095	 * Look up ecm connection so that we can update the status.
1096	 */
1097	ci = ecm_db_connection_serial_find_and_ref(serial);
1098	if (!ci) {
1099		DEBUG_TRACE("%p: destroy callback, connection not found, serial: %u\n", nim, serial);
1100		return;
1101	}
1102
1103	/*
1104	 * Release ref held for this ack/nack response.
1105	 * NOTE: It's okay to do this here, ci won't go away, because the ci is held as
1106	 * a result of the ecm_db_connection_serial_find_and_ref()
1107	 */
1108	ecm_db_connection_deref(ci);
1109
1110	/*
1111	 * Get the front end instance
1112	 */
1113	feci = ecm_db_connection_front_end_get_and_ref(ci);
1114	npci = (struct ecm_sfe_ported_ipv6_connection_instance *)feci;
1115	DEBUG_CHECK_MAGIC(npci, ECM_SFE_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", npci);
1116
1117	ECM_SFE_IPV6_ADDR_TO_IP_ADDR(flow_ip, nirdm->tuple.flow_ip);
1118	ECM_SFE_IPV6_ADDR_TO_IP_ADDR(return_ip, nirdm->tuple.return_ip);
1119
1120	/*
1121	 * Record command duration
1122	 */
1123	ecm_sfe_ipv6_decel_done_time_update(feci);
1124
1125	/*
1126	 * Dump some useful trace information.
1127	 */
1128	DEBUG_TRACE("%p: decelerate response for connection: %p\n", npci, feci->ci);
1129	DEBUG_TRACE("%p: flow_ip: " ECM_IP_ADDR_OCTAL_FMT ":%d\n", npci, ECM_IP_ADDR_TO_OCTAL(flow_ip), nirdm->tuple.flow_ident);
1130	DEBUG_TRACE("%p: return_ip: " ECM_IP_ADDR_OCTAL_FMT ":%d\n", npci, ECM_IP_ADDR_TO_OCTAL(return_ip), nirdm->tuple.return_ident);
1131	DEBUG_TRACE("%p: protocol: %d\n", npci, nirdm->tuple.protocol);
1132
1133	/*
1134	 * Drop decel pending counter
1135	 */
1136	spin_lock_bh(&ecm_sfe_ipv6_lock);
1137	ecm_sfe_ipv6_pending_decel_count--;
1138	DEBUG_ASSERT(ecm_sfe_ipv6_pending_decel_count >= 0, "Bad decel pending counter\n");
1139	spin_unlock_bh(&ecm_sfe_ipv6_lock);
1140
1141	spin_lock_bh(&feci->lock);
1142
1143	/*
1144	 * If decel is not still pending then it's possible that the SFE ended acceleration by some other reason e.g. flush
1145	 * In which case we cannot rely on the response we get here.
1146	 */
1147	if (feci->accel_mode != ECM_FRONT_END_ACCELERATION_MODE_DECEL_PENDING) {
1148		spin_unlock_bh(&feci->lock);
1149
1150		/*
1151		 * Release the connections.
1152		 */
1153		feci->deref(feci);
1154		ecm_db_connection_deref(ci);
1155		return;
1156	}
1157
1158	DEBUG_TRACE("%p: response: %d\n", npci, nim->cm.response);
1159	if (nim->cm.response != SFE_CMN_RESPONSE_ACK) {
1160		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DECEL;
1161	} else {
1162		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_DECEL;
1163	}
1164
1165	/*
1166	 * If connection became defunct then set mode so that no further accel/decel attempts occur.
1167	 */
1168	if (feci->is_defunct) {
1169		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT;
1170	}
1171	spin_unlock_bh(&feci->lock);
1172
1173	/*
1174	 * Ported acceleration ends
1175	 */
1176	spin_lock_bh(&ecm_sfe_ipv6_lock);
1177	ecm_sfe_ported_ipv6_accelerated_count[npci->ported_accelerated_count_index]--;	/* Protocol specific counter */
1178	DEBUG_ASSERT(ecm_sfe_ported_ipv6_accelerated_count[npci->ported_accelerated_count_index] >= 0, "Bad udp accel counter\n");
1179	ecm_sfe_ipv6_accelerated_count--;		/* General running counter */
1180	DEBUG_ASSERT(ecm_sfe_ipv6_accelerated_count >= 0, "Bad accel counter\n");
1181	spin_unlock_bh(&ecm_sfe_ipv6_lock);
1182
1183	/*
1184	 * Release the connections.
1185	 */
1186	feci->deref(feci);
1187	ecm_db_connection_deref(ci);
1188}
1189
1190/*
1191 * ecm_sfe_ported_ipv6_connection_decelerate()
1192 *	Decelerate a connection
1193 */
1194static void ecm_sfe_ported_ipv6_connection_decelerate(struct ecm_front_end_connection_instance *feci)
1195{
1196	struct ecm_sfe_ported_ipv6_connection_instance *npci = (struct ecm_sfe_ported_ipv6_connection_instance *)feci;
1197	struct sfe_ipv6_msg nim;
1198	struct sfe_ipv6_rule_destroy_msg *nirdm;
1199	ip_addr_t src_ip;
1200	ip_addr_t dest_ip;
1201	sfe_tx_status_t sfe_tx_status;
1202
1203	DEBUG_CHECK_MAGIC(npci, ECM_SFE_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", npci);
1204
1205	/*
1206	 * If decelerate is in error or already pending then ignore
1207	 */
1208	spin_lock_bh(&feci->lock);
1209	if (feci->stats.decelerate_pending) {
1210		spin_unlock_bh(&feci->lock);
1211		return;
1212	}
1213
1214	/*
1215	 * If acceleration is pending then we cannot decelerate right now or we will race with it
1216	 * Set a decelerate pending flag that will be actioned when the acceleration command is complete.
1217	 */
1218	if (feci->accel_mode == ECM_FRONT_END_ACCELERATION_MODE_ACCEL_PENDING) {
1219		feci->stats.decelerate_pending = true;
1220		spin_unlock_bh(&feci->lock);
1221		return;
1222	}
1223
1224	/*
1225	 * Can only decelerate if accelerated
1226	 * NOTE: This will also deny accel when the connection is in fail condition too.
1227	 */
1228	if (feci->accel_mode != ECM_FRONT_END_ACCELERATION_MODE_ACCEL) {
1229		spin_unlock_bh(&feci->lock);
1230		return;
1231	}
1232
1233	/*
1234	 * Initiate deceleration
1235	 */
1236	feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_DECEL_PENDING;
1237	spin_unlock_bh(&feci->lock);
1238
1239	/*
1240	 * Increment the decel pending counter
1241	 */
1242	spin_lock_bh(&ecm_sfe_ipv6_lock);
1243	ecm_sfe_ipv6_pending_decel_count++;
1244	spin_unlock_bh(&ecm_sfe_ipv6_lock);
1245
1246	/*
1247	 * Prepare deceleration message
1248	 */
1249	sfe_ipv6_msg_init(&nim, SFE_SPECIAL_INTERFACE_IPV6, SFE_TX_DESTROY_RULE_MSG,
1250			sizeof(struct sfe_ipv6_rule_destroy_msg),
1251			ecm_sfe_ported_ipv6_connection_destroy_callback,
1252			(void *)ecm_db_connection_serial_get(feci->ci));
1253
1254	nirdm = &nim.msg.rule_destroy;
1255	nirdm->tuple.protocol = (int32_t)ecm_db_connection_protocol_get(feci->ci);;
1256
1257	/*
1258	 * Get addressing information
1259	 */
1260	ecm_db_connection_from_address_get(feci->ci, src_ip);
1261	ECM_IP_ADDR_TO_SFE_IPV6_ADDR(nirdm->tuple.flow_ip, src_ip);
1262	ecm_db_connection_to_address_get(feci->ci, dest_ip);
1263	ECM_IP_ADDR_TO_SFE_IPV6_ADDR(nirdm->tuple.return_ip, dest_ip);
1264	nirdm->tuple.flow_ident = htons(ecm_db_connection_from_port_get(feci->ci));
1265	nirdm->tuple.return_ident = htons(ecm_db_connection_to_port_get(feci->ci));
1266
1267	DEBUG_INFO("%p: Ported Connection %p decelerate\n"
1268			"protocol: %d\n"
1269			"src_ip: " ECM_IP_ADDR_OCTAL_FMT ":%d\n"
1270			"dest_ip: " ECM_IP_ADDR_OCTAL_FMT ":%d\n",
1271			npci, feci->ci, nirdm->tuple.protocol,
1272			ECM_IP_ADDR_TO_OCTAL(src_ip), nirdm->tuple.flow_ident,
1273			ECM_IP_ADDR_TO_OCTAL(dest_ip), nirdm->tuple.return_ident);
1274
1275	/*
1276	 * Take a ref to the feci->ci so that it will persist until we get a response from the SFE.
1277	 * NOTE: This will implicitly hold the feci too.
1278	 */
1279	ecm_db_connection_ref(feci->ci);
1280
1281	/*
1282	 * We are about to issue the command, record the time of transmission
1283	 */
1284	spin_lock_bh(&feci->lock);
1285	feci->stats.cmd_time_begun = jiffies;
1286	spin_unlock_bh(&feci->lock);
1287
1288	/*
1289	 * Destroy the SFE connection cache entry.
1290	 */
1291	sfe_tx_status = sfe_drv_ipv6_tx(ecm_sfe_ipv6_drv_mgr, &nim);
1292	if (sfe_tx_status == SFE_TX_SUCCESS) {
1293		/*
1294		 * Reset the driver_fail count - transmission was okay here.
1295		 */
1296		spin_lock_bh(&feci->lock);
1297		feci->stats.driver_fail = 0;
1298		spin_unlock_bh(&feci->lock);
1299		return;
1300	}
1301
1302	/*
1303	 * Release the ref take, SFE driver did not accept our command.
1304	 */
1305	ecm_db_connection_deref(feci->ci);
1306
1307	/*
1308	 * TX failed
1309	 */
1310	spin_lock_bh(&feci->lock);
1311	feci->stats.driver_fail_total++;
1312	feci->stats.driver_fail++;
1313	if (feci->stats.driver_fail >= feci->stats.driver_fail_limit) {
1314		DEBUG_WARN("%p: Decel failed - driver fail limit\n", npci);
1315		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DRIVER;
1316	}
1317	spin_unlock_bh(&feci->lock);
1318
1319	/*
1320	 * Could not send the request, decrement the decel pending counter
1321	 */
1322	spin_lock_bh(&ecm_sfe_ipv6_lock);
1323	ecm_sfe_ipv6_pending_decel_count--;
1324	DEBUG_ASSERT(ecm_sfe_ipv6_pending_decel_count >= 0, "Bad decel pending counter\n");
1325	spin_unlock_bh(&ecm_sfe_ipv6_lock);
1326}
1327
1328/*
1329 * ecm_sfe_ported_ipv6_connection_defunct_callback()
1330 *	Callback to be called when a ported connection has become defunct.
1331 */
1332static void ecm_sfe_ported_ipv6_connection_defunct_callback(void *arg)
1333{
1334	struct ecm_front_end_connection_instance *feci = (struct ecm_front_end_connection_instance *)arg;
1335	struct ecm_sfe_ported_ipv6_connection_instance *npci = (struct ecm_sfe_ported_ipv6_connection_instance *)feci;
1336
1337	DEBUG_CHECK_MAGIC(npci, ECM_SFE_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", npci);
1338
1339	spin_lock_bh(&feci->lock);
1340
1341	/*
1342	 * If connection has already become defunct, do nothing.
1343	 */
1344	if (feci->is_defunct) {
1345		spin_unlock_bh(&feci->lock);
1346		return;
1347	}
1348	feci->is_defunct = true;
1349
1350	/*
1351	 * If the connection is already in one of the fail modes, do nothing, keep the current accel_mode.
1352	 */
1353	if (ECM_FRONT_END_ACCELERATION_FAILED(feci->accel_mode)) {
1354		spin_unlock_bh(&feci->lock);
1355		return;
1356	}
1357
1358	/*
1359	 * If the connection is decel then ensure it will not attempt accel while defunct.
1360	 */
1361	if (feci->accel_mode == ECM_FRONT_END_ACCELERATION_MODE_DECEL) {
1362		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT;
1363		spin_unlock_bh(&feci->lock);
1364		return;
1365	}
1366
1367	/*
1368	 * If the connection is decel pending then decel operation is in progress anyway.
1369	 */
1370	if (feci->accel_mode == ECM_FRONT_END_ACCELERATION_MODE_DECEL_PENDING) {
1371		spin_unlock_bh(&feci->lock);
1372		return;
1373	}
1374
1375	/*
1376	 * If none of the cases matched above, this means the connection is in one of the
1377	 * accel modes (accel or accel_pending) so we force a deceleration.
1378	 * NOTE: If the mode is accel pending then the decel will be actioned when that is completed.
1379	 */
1380	spin_unlock_bh(&feci->lock);
1381	ecm_sfe_ported_ipv6_connection_decelerate(feci);
1382}
1383
1384/*
1385 * ecm_sfe_ported_ipv6_connection_accel_state_get()
1386 *	Get acceleration state
1387 */
1388static ecm_front_end_acceleration_mode_t ecm_sfe_ported_ipv6_connection_accel_state_get(struct ecm_front_end_connection_instance *feci)
1389{
1390	struct ecm_sfe_ported_ipv6_connection_instance *npci = (struct ecm_sfe_ported_ipv6_connection_instance *)feci;
1391	ecm_front_end_acceleration_mode_t state;
1392
1393	DEBUG_CHECK_MAGIC(npci, ECM_SFE_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", npci);
1394	spin_lock_bh(&feci->lock);
1395	state = feci->accel_mode;
1396	spin_unlock_bh(&feci->lock);
1397	return state;
1398}
1399
1400/*
1401 * ecm_sfe_ported_ipv6_connection_action_seen()
1402 *	Acceleration action / activity has been seen for this connection.
1403 *
1404 * NOTE: Call the action_seen() method when the SFE has demonstrated that it has offloaded some data for a connection.
1405 */
1406static void ecm_sfe_ported_ipv6_connection_action_seen(struct ecm_front_end_connection_instance *feci)
1407{
1408	struct ecm_sfe_ported_ipv6_connection_instance *npci = (struct ecm_sfe_ported_ipv6_connection_instance *)feci;
1409
1410	DEBUG_CHECK_MAGIC(npci, ECM_SFE_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", npci);
1411	DEBUG_INFO("%p: Action seen\n", npci);
1412	spin_lock_bh(&feci->lock);
1413	feci->stats.no_action_seen = 0;
1414	spin_unlock_bh(&feci->lock);
1415}
1416
1417/*
1418 * ecm_sfe_ported_ipv6_connection_accel_ceased()
1419 *	SFE has indicated that acceleration has stopped.
1420 *
1421 * NOTE: This is called in response to an SFE self-initiated termination of acceleration.
1422 * This must NOT be called because the ECM terminated the acceleration.
1423 */
1424static void ecm_sfe_ported_ipv6_connection_accel_ceased(struct ecm_front_end_connection_instance *feci)
1425{
1426	struct ecm_sfe_ported_ipv6_connection_instance *npci = (struct ecm_sfe_ported_ipv6_connection_instance *)feci;
1427
1428	DEBUG_CHECK_MAGIC(npci, ECM_SFE_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", npci);
1429	DEBUG_INFO("%p: accel ceased\n", npci);
1430
1431	spin_lock_bh(&feci->lock);
1432
1433	/*
1434	 * If we are in accel-pending state then the SFE has issued a flush out-of-order
1435	 * with the ACK/NACK we are actually waiting for.
1436	 * To work around this we record a "flush has already happened" and will action it when we finally get that ACK/NACK.
1437	 * GGG TODO This should eventually be removed when the SFE honours messaging sequence.
1438	 */
1439	if (feci->accel_mode == ECM_FRONT_END_ACCELERATION_MODE_ACCEL_PENDING) {
1440		feci->stats.flush_happened = true;
1441		feci->stats.flush_happened_total++;
1442		spin_unlock_bh(&feci->lock);
1443		return;
1444	}
1445
1446	/*
1447	 * If connection is no longer accelerated by the time we get here just ignore the command
1448	 */
1449	if (feci->accel_mode != ECM_FRONT_END_ACCELERATION_MODE_ACCEL) {
1450		spin_unlock_bh(&feci->lock);
1451		return;
1452	}
1453
1454	/*
1455	 * If the no_action_seen counter was not reset then acceleration ended without any offload action
1456	 */
1457	if (feci->stats.no_action_seen) {
1458		feci->stats.no_action_seen_total++;
1459	}
1460
1461	/*
1462	 * If the no_action_seen indicates successive cessations of acceleration without any offload action occuring
1463	 * then we fail out this connection
1464	 */
1465	if (feci->stats.no_action_seen >= feci->stats.no_action_seen_limit) {
1466		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_NO_ACTION;
1467	} else {
1468		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_DECEL;
1469	}
1470	spin_unlock_bh(&feci->lock);
1471
1472	/*
1473	 * Ported acceleration ends
1474	 */
1475	spin_lock_bh(&ecm_sfe_ipv6_lock);
1476	ecm_sfe_ported_ipv6_accelerated_count[npci->ported_accelerated_count_index]--;	/* Protocol specific counter */
1477	DEBUG_ASSERT(ecm_sfe_ported_ipv6_accelerated_count[npci->ported_accelerated_count_index] >= 0, "Bad ported accel counter\n");
1478	ecm_sfe_ipv6_accelerated_count--;		/* General running counter */
1479	DEBUG_ASSERT(ecm_sfe_ipv6_accelerated_count >= 0, "Bad accel counter\n");
1480	spin_unlock_bh(&ecm_sfe_ipv6_lock);
1481}
1482
1483/*
1484 * ecm_sfe_ported_ipv6_connection_ref()
1485 *	Ref a connection front end instance
1486 */
1487static void ecm_sfe_ported_ipv6_connection_ref(struct ecm_front_end_connection_instance *feci)
1488{
1489	struct ecm_sfe_ported_ipv6_connection_instance *npci = (struct ecm_sfe_ported_ipv6_connection_instance *)feci;
1490
1491	DEBUG_CHECK_MAGIC(npci, ECM_SFE_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", npci);
1492	spin_lock_bh(&feci->lock);
1493	feci->refs++;
1494	DEBUG_TRACE("%p: npci ref %d\n", npci, feci->refs);
1495	DEBUG_ASSERT(feci->refs > 0, "%p: ref wrap\n", npci);
1496	spin_unlock_bh(&feci->lock);
1497}
1498
1499/*
1500 * ecm_sfe_ported_ipv6_connection_deref()
1501 *	Deref a connection front end instance
1502 */
1503static int ecm_sfe_ported_ipv6_connection_deref(struct ecm_front_end_connection_instance *feci)
1504{
1505	struct ecm_sfe_ported_ipv6_connection_instance *npci = (struct ecm_sfe_ported_ipv6_connection_instance *)feci;
1506
1507	DEBUG_CHECK_MAGIC(npci, ECM_SFE_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", npci);
1508
1509	spin_lock_bh(&feci->lock);
1510	feci->refs--;
1511	DEBUG_ASSERT(feci->refs >= 0, "%p: ref wrap\n", npci);
1512
1513	if (feci->refs > 0) {
1514		int refs = feci->refs;
1515		spin_unlock_bh(&feci->lock);
1516		DEBUG_TRACE("%p: npci deref %d\n", npci, refs);
1517		return refs;
1518	}
1519	spin_unlock_bh(&feci->lock);
1520
1521	/*
1522	 * We can now destroy the instance
1523	 */
1524	DEBUG_TRACE("%p: npci final\n", npci);
1525	DEBUG_CLEAR_MAGIC(npci);
1526	kfree(npci);
1527
1528	return 0;
1529}
1530
1531#ifdef ECM_STATE_OUTPUT_ENABLE
1532/*
1533 * ecm_sfe_ported_ipv6_connection_state_get()
1534 *	Return the state of this ported front end instance
1535 */
1536static int ecm_sfe_ported_ipv6_connection_state_get(struct ecm_front_end_connection_instance *feci, struct ecm_state_file_instance *sfi)
1537{
1538	int result;
1539	bool can_accel;
1540	ecm_front_end_acceleration_mode_t accel_mode;
1541	struct ecm_front_end_connection_mode_stats stats;
1542	struct ecm_sfe_ported_ipv6_connection_instance *npci = (struct ecm_sfe_ported_ipv6_connection_instance *)feci;
1543
1544	DEBUG_CHECK_MAGIC(npci, ECM_SFE_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", npci);
1545
1546	spin_lock_bh(&feci->lock);
1547	can_accel = feci->can_accel;
1548	accel_mode = feci->accel_mode;
1549	memcpy(&stats, &feci->stats, sizeof(struct ecm_front_end_connection_mode_stats));
1550	spin_unlock_bh(&feci->lock);
1551
1552	if ((result = ecm_state_prefix_add(sfi, "front_end_v6.ported"))) {
1553		return result;
1554	}
1555
1556	if ((result = ecm_state_write(sfi, "can_accel", "%d", can_accel))) {
1557		return result;
1558	}
1559	if ((result = ecm_state_write(sfi, "accel_mode", "%d", accel_mode))) {
1560		return result;
1561	}
1562	if ((result = ecm_state_write(sfi, "decelerate_pending", "%d", stats.decelerate_pending))) {
1563		return result;
1564	}
1565	if ((result = ecm_state_write(sfi, "flush_happened_total", "%d", stats.flush_happened_total))) {
1566		return result;
1567	}
1568	if ((result = ecm_state_write(sfi, "no_action_seen_total", "%d", stats.no_action_seen_total))) {
1569		return result;
1570	}
1571	if ((result = ecm_state_write(sfi, "no_action_seen", "%d", stats.no_action_seen))) {
1572		return result;
1573	}
1574	if ((result = ecm_state_write(sfi, "no_action_seen_limit", "%d", stats.no_action_seen_limit))) {
1575		return result;
1576	}
1577	if ((result = ecm_state_write(sfi, "driver_fail_total", "%d", stats.driver_fail_total))) {
1578		return result;
1579	}
1580	if ((result = ecm_state_write(sfi, "driver_fail", "%d", stats.driver_fail))) {
1581		return result;
1582	}
1583	if ((result = ecm_state_write(sfi, "driver_fail_limit", "%d", stats.driver_fail_limit))) {
1584		return result;
1585	}
1586	if ((result = ecm_state_write(sfi, "ae_nack_total", "%d", stats.ae_nack_total))) {
1587		return result;
1588	}
1589	if ((result = ecm_state_write(sfi, "ae_nack", "%d", stats.ae_nack))) {
1590		return result;
1591	}
1592	if ((result = ecm_state_write(sfi, "ae_nack_limit", "%d", stats.ae_nack_limit))) {
1593		return result;
1594	}
1595
1596 	return ecm_state_prefix_remove(sfi);
1597}
1598#endif
1599
1600/*
1601 * ecm_sfe_ported_ipv6_connection_instance_alloc()
1602 *	Create a front end instance specific for ported connection
1603 */
1604static struct ecm_sfe_ported_ipv6_connection_instance *ecm_sfe_ported_ipv6_connection_instance_alloc(
1605								struct ecm_db_connection_instance *ci,
1606								int protocol,
1607								bool can_accel)
1608{
1609	struct ecm_sfe_ported_ipv6_connection_instance *npci;
1610	struct ecm_front_end_connection_instance *feci;
1611
1612	npci = (struct ecm_sfe_ported_ipv6_connection_instance *)kzalloc(sizeof(struct ecm_sfe_ported_ipv6_connection_instance), GFP_ATOMIC | __GFP_NOWARN);
1613	if (!npci) {
1614		DEBUG_WARN("Ported Front end alloc failed\n");
1615		return NULL;
1616	}
1617
1618	/*
1619	 * Refs is 1 for the creator of the connection
1620	 */
1621	feci = (struct ecm_front_end_connection_instance *)npci;
1622	feci->refs = 1;
1623	DEBUG_SET_MAGIC(npci, ECM_SFE_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC);
1624	spin_lock_init(&feci->lock);
1625
1626	feci->can_accel = can_accel;
1627	feci->accel_mode = (can_accel) ? ECM_FRONT_END_ACCELERATION_MODE_DECEL : ECM_FRONT_END_ACCELERATION_MODE_FAIL_DENIED;
1628	spin_lock_bh(&ecm_sfe_ipv6_lock);
1629	feci->stats.no_action_seen_limit = ecm_sfe_ipv6_no_action_limit_default;
1630	feci->stats.driver_fail_limit = ecm_sfe_ipv6_driver_fail_limit_default;
1631	feci->stats.ae_nack_limit = ecm_sfe_ipv6_nack_limit_default;
1632	spin_unlock_bh(&ecm_sfe_ipv6_lock);
1633
1634	/*
1635	 * Copy reference to connection - no need to ref ci as ci maintains a ref to this instance instead (this instance persists for as long as ci does)
1636	 */
1637	feci->ci = ci;
1638
1639	/*
1640	 * Populate the methods and callbacks
1641	 */
1642	feci->ref = ecm_sfe_ported_ipv6_connection_ref;
1643	feci->deref = ecm_sfe_ported_ipv6_connection_deref;
1644	feci->decelerate = ecm_sfe_ported_ipv6_connection_decelerate;
1645	feci->accel_state_get = ecm_sfe_ported_ipv6_connection_accel_state_get;
1646	feci->action_seen = ecm_sfe_ported_ipv6_connection_action_seen;
1647	feci->accel_ceased = ecm_sfe_ported_ipv6_connection_accel_ceased;
1648#ifdef ECM_STATE_OUTPUT_ENABLE
1649	feci->state_get = ecm_sfe_ported_ipv6_connection_state_get;
1650#endif
1651	feci->ae_interface_number_by_dev_get = ecm_sfe_common_get_interface_number_by_dev;
1652
1653	if (protocol == IPPROTO_TCP) {
1654		npci->ported_accelerated_count_index = ECM_SFE_PORTED_IPV6_PROTO_TCP;
1655	} else if (protocol == IPPROTO_UDP) {
1656		npci->ported_accelerated_count_index = ECM_SFE_PORTED_IPV6_PROTO_UDP;
1657	} else {
1658		DEBUG_WARN("%p: Wrong protocol: %d\n", npci, protocol);
1659		DEBUG_CLEAR_MAGIC(npci);
1660		kfree(npci);
1661		return NULL;
1662	}
1663
1664	return npci;
1665}
1666
1667/*
1668 * ecm_sfe_ported_ipv6_process()
1669 *	Process a ported packet
1670 */
1671unsigned int ecm_sfe_ported_ipv6_process(struct net_device *out_dev,
1672							struct net_device *in_dev,
1673							uint8_t *src_node_addr,
1674							uint8_t *dest_node_addr,
1675							bool can_accel,  bool is_routed, bool is_l2_encap, struct sk_buff *skb,
1676							struct ecm_tracker_ip_header *iph,
1677							struct nf_conn *ct, ecm_tracker_sender_type_t sender, ecm_db_direction_t ecm_dir,
1678							struct nf_conntrack_tuple *orig_tuple, struct nf_conntrack_tuple *reply_tuple,
1679							ip_addr_t ip_src_addr, ip_addr_t ip_dest_addr)
1680{
1681	struct tcphdr *tcp_hdr;
1682	struct tcphdr tcp_hdr_buff;
1683	struct udphdr *udp_hdr;
1684	struct udphdr udp_hdr_buff;
1685	int src_port;
1686	int dest_port;
1687	struct ecm_db_connection_instance *ci;
1688	ip_addr_t match_addr;
1689	struct ecm_classifier_instance *assignments[ECM_CLASSIFIER_TYPES];
1690	int aci_index;
1691	int assignment_count;
1692	ecm_db_timer_group_t ci_orig_timer_group;
1693	struct ecm_classifier_process_response prevalent_pr;
1694	int protocol = (int)orig_tuple->dst.protonum;
1695
1696	if (protocol == IPPROTO_TCP) {
1697		/*
1698		 * Extract TCP header to obtain port information
1699		 */
1700		tcp_hdr = ecm_tracker_tcp_check_header_and_read(skb, iph, &tcp_hdr_buff);
1701		if (unlikely(!tcp_hdr)) {
1702			DEBUG_WARN("TCP packet header %p\n", skb);
1703			return NF_ACCEPT;
1704		}
1705
1706		/*
1707		 * Now extract information, if we have conntrack then use that (which would already be in the tuples)
1708		 */
1709		if (unlikely(!ct)) {
1710			orig_tuple->src.u.tcp.port = tcp_hdr->source;
1711			orig_tuple->dst.u.tcp.port = tcp_hdr->dest;
1712			reply_tuple->src.u.tcp.port = tcp_hdr->dest;
1713			reply_tuple->dst.u.tcp.port = tcp_hdr->source;
1714		}
1715
1716		/*
1717		 * Extract transport port information
1718		 * Refer to the ecm_sfe_ipv6_process() for information on how we extract this information.
1719		 */
1720		if (sender == ECM_TRACKER_SENDER_TYPE_SRC) {
1721			switch(ecm_dir) {
1722			case ECM_DB_DIRECTION_NON_NAT:
1723			case ECM_DB_DIRECTION_BRIDGED:
1724				src_port = ntohs(orig_tuple->src.u.tcp.port);
1725				dest_port = ntohs(orig_tuple->dst.u.tcp.port);
1726				break;
1727			default:
1728				DEBUG_ASSERT(false, "Unhandled ecm_dir: %d\n", ecm_dir);
1729			}
1730		} else {
1731			switch(ecm_dir) {
1732			case ECM_DB_DIRECTION_NON_NAT:
1733			case ECM_DB_DIRECTION_BRIDGED:
1734				dest_port = ntohs(orig_tuple->src.u.tcp.port);
1735				src_port = ntohs(orig_tuple->dst.u.tcp.port);
1736				break;
1737			default:
1738				DEBUG_ASSERT(false, "Unhandled ecm_dir: %d\n", ecm_dir);
1739			}
1740		}
1741
1742		DEBUG_TRACE("TCP src: " ECM_IP_ADDR_OCTAL_FMT ":%d, dest: " ECM_IP_ADDR_OCTAL_FMT ":%d, dir %d\n",
1743				ECM_IP_ADDR_TO_OCTAL(ip_src_addr), src_port, ECM_IP_ADDR_TO_OCTAL(ip_dest_addr), dest_port, ecm_dir);
1744	} else if (protocol == IPPROTO_UDP) {
1745		/*
1746		 * Extract UDP header to obtain port information
1747		 */
1748		udp_hdr = ecm_tracker_udp_check_header_and_read(skb, iph, &udp_hdr_buff);
1749		if (unlikely(!udp_hdr)) {
1750			DEBUG_WARN("Invalid UDP header in skb %p\n", skb);
1751			return NF_ACCEPT;
1752		}
1753
1754		/*
1755		 * Deny acceleration for L2TP-over-UDP tunnel
1756		 */
1757		if (skb->sk) {
1758			if(skb->sk->sk_protocol == IPPROTO_UDP) {
1759				struct udp_sock *usk = udp_sk(skb->sk);
1760				if (usk) {
1761					if (unlikely(usk->encap_type == UDP_ENCAP_L2TPINUDP)) {
1762						DEBUG_TRACE("Skip packets for L2TP tunnel in skb %p\n", skb);
1763						can_accel = false;
1764					}
1765				}
1766			}
1767		}
1768
1769		/*
1770		 * Now extract information, if we have conntrack then use that (which would already be in the tuples)
1771		 */
1772		if (unlikely(!ct)) {
1773			orig_tuple->src.u.udp.port = udp_hdr->source;
1774			orig_tuple->dst.u.udp.port = udp_hdr->dest;
1775			reply_tuple->src.u.udp.port = udp_hdr->dest;
1776			reply_tuple->dst.u.udp.port = udp_hdr->source;
1777		}
1778
1779		/*
1780		 * Extract transport port information
1781		 * Refer to the ecm_sfe_ipv6_process() for information on how we extract this information.
1782		 */
1783		if (sender == ECM_TRACKER_SENDER_TYPE_SRC) {
1784			switch(ecm_dir) {
1785			case ECM_DB_DIRECTION_NON_NAT:
1786			case ECM_DB_DIRECTION_BRIDGED:
1787				src_port = ntohs(orig_tuple->src.u.udp.port);
1788				dest_port = ntohs(orig_tuple->dst.u.udp.port);
1789				break;
1790			default:
1791				DEBUG_ASSERT(false, "Unhandled ecm_dir: %d\n", ecm_dir);
1792			}
1793		} else {
1794			switch(ecm_dir) {
1795			case ECM_DB_DIRECTION_NON_NAT:
1796			case ECM_DB_DIRECTION_BRIDGED:
1797				dest_port = ntohs(orig_tuple->src.u.udp.port);
1798				src_port = ntohs(orig_tuple->dst.u.udp.port);
1799				break;
1800			default:
1801				DEBUG_ASSERT(false, "Unhandled ecm_dir: %d\n", ecm_dir);
1802			}
1803		}
1804		DEBUG_TRACE("UDP src: " ECM_IP_ADDR_OCTAL_FMT ":%d, dest: " ECM_IP_ADDR_OCTAL_FMT ":%d, dir %d\n",
1805				ECM_IP_ADDR_TO_OCTAL(ip_src_addr), src_port, ECM_IP_ADDR_TO_OCTAL(ip_dest_addr), dest_port, ecm_dir);
1806	} else {
1807		DEBUG_WARN("Wrong protocol: %d\n", protocol);
1808		return NF_ACCEPT;
1809	}
1810
1811	/*
1812	 * Look up a connection
1813	 */
1814	ci = ecm_db_connection_find_and_ref(ip_src_addr, ip_dest_addr, protocol, src_port, dest_port);
1815
1816	/*
1817	 * If there is no existing connection then create a new one.
1818	 */
1819	if (unlikely(!ci)) {
1820		struct ecm_db_mapping_instance *src_mi;
1821		struct ecm_db_mapping_instance *dest_mi;
1822		struct ecm_db_node_instance *src_ni;
1823		struct ecm_db_node_instance *dest_ni;
1824		struct ecm_classifier_default_instance *dci;
1825		struct ecm_db_connection_instance *nci;
1826		ecm_classifier_type_t classifier_type;
1827		struct ecm_front_end_connection_instance *feci;
1828		int32_t to_list_first;
1829		struct ecm_db_iface_instance *to_list[ECM_DB_IFACE_HEIRARCHY_MAX];
1830		int32_t from_list_first;
1831		struct ecm_db_iface_instance *from_list[ECM_DB_IFACE_HEIRARCHY_MAX];
1832
1833		DEBUG_INFO("New Ported connection from " ECM_IP_ADDR_OCTAL_FMT ":%u to " ECM_IP_ADDR_OCTAL_FMT ":%u\n",
1834				ECM_IP_ADDR_TO_OCTAL(ip_src_addr), src_port, ECM_IP_ADDR_TO_OCTAL(ip_dest_addr), dest_port);
1835
1836		/*
1837		 * Before we attempt to create the connection are we being terminated?
1838		 */
1839		spin_lock_bh(&ecm_sfe_ipv6_lock);
1840		if (ecm_sfe_ipv6_terminate_pending) {
1841			spin_unlock_bh(&ecm_sfe_ipv6_lock);
1842			DEBUG_WARN("Terminating\n");
1843
1844			/*
1845			 * As we are terminating we just allow the packet to pass - it's no longer our concern
1846			 */
1847			return NF_ACCEPT;
1848		}
1849		spin_unlock_bh(&ecm_sfe_ipv6_lock);
1850
1851		/*
1852		 * Does this connection have a conntrack entry?
1853		 */
1854		if (ct) {
1855			unsigned int conn_count;
1856
1857			/*
1858			 * If we have exceeded the connection limit (according to conntrack) then abort
1859			 * NOTE: Conntrack, when at its limit, will destroy a connection to make way for a new.
1860			 * Conntrack won't exceed its limit but ECM can due to it needing to hold connections while
1861			 * acceleration commands are in-flight.
1862			 * This means that ECM can 'fall behind' somewhat with the connection state wrt conntrack connection state.
1863			 * This is not seen as an issue since conntrack will have issued us with a destroy event for the flushed connection(s)
1864			 * and we will eventually catch up.
1865			 * Since ECM is capable of handling connections mid-flow ECM will pick up where it can.
1866			 */
1867			conn_count = (unsigned int)ecm_db_connection_count_get();
1868			if (conn_count >= nf_conntrack_max) {
1869				DEBUG_WARN("ECM Connection count limit reached: db: %u, ct: %u\n", conn_count, nf_conntrack_max);
1870				return NF_ACCEPT;
1871			}
1872
1873			if (protocol == IPPROTO_TCP) {
1874				/*
1875				 * No point in establishing a connection for one that is closing
1876				 */
1877				spin_lock_bh(&ct->lock);
1878				if (ct->proto.tcp.state >= TCP_CONNTRACK_FIN_WAIT && ct->proto.tcp.state <= TCP_CONNTRACK_CLOSE) {
1879					spin_unlock_bh(&ct->lock);
1880					DEBUG_TRACE("%p: Connection in termination state %#X\n", ct, ct->proto.tcp.state);
1881					return NF_ACCEPT;
1882				}
1883				spin_unlock_bh(&ct->lock);
1884			}
1885		}
1886
1887		/*
1888		 * Now allocate the new connection
1889		 */
1890		nci = ecm_db_connection_alloc();
1891		if (!nci) {
1892			DEBUG_WARN("Failed to allocate connection\n");
1893			return NF_ACCEPT;
1894		}
1895
1896		/*
1897		 * Connection must have a front end instance associated with it
1898		 */
1899		feci = (struct ecm_front_end_connection_instance *)ecm_sfe_ported_ipv6_connection_instance_alloc(nci, protocol, can_accel);
1900		if (!feci) {
1901			ecm_db_connection_deref(nci);
1902			DEBUG_WARN("Failed to allocate front end\n");
1903			return NF_ACCEPT;
1904		}
1905
1906		/*
1907		 * Get the src and destination mappings
1908		 * For this we also need the interface lists which we also set upon the new connection while we are at it.
1909		 * GGG TODO rework terms of "src/dest" - these need to be named consistently as from/to as per database terms.
1910		 * GGG TODO The empty list checks should not be needed, mapping_establish_and_ref() should fail out if there is no list anyway.
1911		 */
1912		DEBUG_TRACE("%p: Create the 'from' interface heirarchy list\n", nci);
1913		from_list_first = ecm_interface_heirarchy_construct(feci, from_list, ip_dest_addr, ip_src_addr, 6, protocol, in_dev, is_routed, in_dev, src_node_addr, dest_node_addr, NULL);
1914		if (from_list_first == ECM_DB_IFACE_HEIRARCHY_MAX) {
1915			feci->deref(feci);
1916			ecm_db_connection_deref(nci);
1917			DEBUG_WARN("Failed to obtain 'from' heirarchy list\n");
1918			return NF_ACCEPT;
1919		}
1920		ecm_db_connection_from_interfaces_reset(nci, from_list, from_list_first);
1921
1922		DEBUG_TRACE("%p: Create source node\n", nci);
1923		src_ni = ecm_sfe_ipv6_node_establish_and_ref(feci, in_dev, ip_src_addr, from_list, from_list_first, src_node_addr);
1924		ecm_db_connection_interfaces_deref(from_list, from_list_first);
1925		if (!src_ni) {
1926			feci->deref(feci);
1927			ecm_db_connection_deref(nci);
1928			DEBUG_WARN("Failed to establish source node\n");
1929			return NF_ACCEPT;
1930		}
1931
1932		DEBUG_TRACE("%p: Create source mapping\n", nci);
1933		src_mi = ecm_sfe_ipv6_mapping_establish_and_ref(ip_src_addr, src_port);
1934		if (!src_mi) {
1935			ecm_db_node_deref(src_ni);
1936			feci->deref(feci);
1937			ecm_db_connection_deref(nci);
1938			DEBUG_WARN("Failed to establish src mapping\n");
1939			return NF_ACCEPT;
1940		}
1941
1942		DEBUG_TRACE("%p: Create the 'to' interface heirarchy list\n", nci);
1943		to_list_first = ecm_interface_heirarchy_construct(feci, to_list, ip_src_addr, ip_dest_addr, 6, protocol, out_dev, is_routed, in_dev, dest_node_addr, src_node_addr, NULL);
1944		if (to_list_first == ECM_DB_IFACE_HEIRARCHY_MAX) {
1945			ecm_db_mapping_deref(src_mi);
1946			ecm_db_node_deref(src_ni);
1947			feci->deref(feci);
1948			ecm_db_connection_deref(nci);
1949			DEBUG_WARN("Failed to obtain 'to' heirarchy list\n");
1950			return NF_ACCEPT;
1951		}
1952		ecm_db_connection_to_interfaces_reset(nci, to_list, to_list_first);
1953
1954		DEBUG_TRACE("%p: Create dest node\n", nci);
1955		dest_ni = ecm_sfe_ipv6_node_establish_and_ref(feci, out_dev, ip_dest_addr, to_list, to_list_first, dest_node_addr);
1956		ecm_db_connection_interfaces_deref(to_list, to_list_first);
1957		if (!dest_ni) {
1958			ecm_db_mapping_deref(src_mi);
1959			ecm_db_node_deref(src_ni);
1960			feci->deref(feci);
1961			ecm_db_connection_deref(nci);
1962			DEBUG_WARN("Failed to establish dest node\n");
1963			return NF_ACCEPT;
1964		}
1965
1966		DEBUG_TRACE("%p: Create dest mapping\n", nci);
1967		dest_mi = ecm_sfe_ipv6_mapping_establish_and_ref(ip_dest_addr, dest_port);
1968		if (!dest_mi) {
1969			ecm_db_node_deref(dest_ni);
1970			ecm_db_mapping_deref(src_mi);
1971			ecm_db_node_deref(src_ni);
1972			feci->deref(feci);
1973			ecm_db_connection_deref(nci);
1974			DEBUG_WARN("Failed to establish dest mapping\n");
1975			return NF_ACCEPT;
1976		}
1977
1978		/*
1979		 * Every connection also needs a default classifier which is considered 'special'
1980		 */
1981		dci = ecm_classifier_default_instance_alloc(nci, protocol, ecm_dir, src_port, dest_port);
1982		if (!dci) {
1983			ecm_db_mapping_deref(dest_mi);
1984			ecm_db_node_deref(dest_ni);
1985			ecm_db_mapping_deref(src_mi);
1986			ecm_db_node_deref(src_ni);
1987			feci->deref(feci);
1988			ecm_db_connection_deref(nci);
1989			DEBUG_WARN("Failed to allocate default classifier\n");
1990			return NF_ACCEPT;
1991		}
1992		ecm_db_connection_classifier_assign(nci, (struct ecm_classifier_instance *)dci);
1993
1994		/*
1995		 * Every connection starts with a full complement of classifiers assigned.
1996		 * NOTE: Default classifier is a special case considered previously
1997		 */
1998		for (classifier_type = ECM_CLASSIFIER_TYPE_DEFAULT + 1; classifier_type < ECM_CLASSIFIER_TYPES; ++classifier_type) {
1999			struct ecm_classifier_instance *aci = ecm_sfe_ipv6_assign_classifier(nci, classifier_type);
2000			if (aci) {
2001				aci->deref(aci);
2002			} else {
2003				dci->base.deref((struct ecm_classifier_instance *)dci);
2004				ecm_db_mapping_deref(dest_mi);
2005				ecm_db_node_deref(dest_ni);
2006				ecm_db_mapping_deref(src_mi);
2007				ecm_db_node_deref(src_ni);
2008				feci->deref(feci);
2009				ecm_db_connection_deref(nci);
2010				DEBUG_WARN("Failed to allocate classifiers assignments\n");
2011				return NF_ACCEPT;
2012			}
2013		}
2014
2015		/*
2016		 * Now add the connection into the database.
2017		 * NOTE: In an SMP situation such as ours there is a possibility that more than one packet for the same
2018		 * connection is being processed simultaneously.
2019		 * We *could* end up creating more than one connection instance for the same actual connection.
2020		 * To guard against this we now perform a mutex'd lookup of the connection + add once more - another cpu may have created it before us.
2021		 */
2022		spin_lock_bh(&ecm_sfe_ipv6_lock);
2023		ci = ecm_db_connection_find_and_ref(ip_src_addr, ip_dest_addr, protocol, src_port, dest_port);
2024		if (ci) {
2025			/*
2026			 * Another cpu created the same connection before us - use the one we just found
2027			 */
2028			spin_unlock_bh(&ecm_sfe_ipv6_lock);
2029			ecm_db_connection_deref(nci);
2030		} else {
2031			ecm_db_timer_group_t tg;
2032			ecm_tracker_sender_state_t src_state;
2033			ecm_tracker_sender_state_t dest_state;
2034			ecm_tracker_connection_state_t state;
2035			struct ecm_tracker_instance *ti;
2036
2037			/*
2038			 * Ask tracker for timer group to set the connection to initially.
2039			 */
2040			ti = dci->tracker_get_and_ref(dci);
2041			ti->state_get(ti, &src_state, &dest_state, &state, &tg);
2042			ti->deref(ti);
2043
2044			/*
2045			 * Add the new connection we created into the database
2046			 * NOTE: assign to a short timer group for now - it is the assigned classifiers responsibility to do this
2047			 */
2048			ecm_db_connection_add(nci, feci, src_mi, dest_mi, src_mi, dest_mi,
2049					src_ni, dest_ni, src_ni, dest_ni,
2050					6, protocol, ecm_dir,
2051					NULL /* final callback */,
2052					ecm_sfe_ported_ipv6_connection_defunct_callback,
2053					tg, is_routed, nci);
2054
2055			spin_unlock_bh(&ecm_sfe_ipv6_lock);
2056
2057			ci = nci;
2058			DEBUG_INFO("%p: New ported connection created\n", ci);
2059		}
2060
2061		/*
2062		 * No longer need referenecs to the objects we created
2063		 */
2064		dci->base.deref((struct ecm_classifier_instance *)dci);
2065		ecm_db_mapping_deref(dest_mi);
2066		ecm_db_node_deref(dest_ni);
2067		ecm_db_mapping_deref(src_mi);
2068		ecm_db_node_deref(src_ni);
2069		feci->deref(feci);
2070	}
2071
2072	/*
2073	 * Keep connection alive as we have seen activity
2074	 */
2075	if (!ecm_db_connection_defunct_timer_touch(ci)) {
2076		ecm_db_connection_deref(ci);
2077		return NF_ACCEPT;
2078	}
2079
2080	/*
2081	 * Identify which side of the connection is sending
2082	 * NOTE: This may be different than what sender is at the moment
2083	 * given the connection we have located.
2084	 */
2085	ecm_db_connection_from_address_get(ci, match_addr);
2086	if (ECM_IP_ADDR_MATCH(ip_src_addr, match_addr)) {
2087		sender = ECM_TRACKER_SENDER_TYPE_SRC;
2088	} else {
2089		sender = ECM_TRACKER_SENDER_TYPE_DEST;
2090	}
2091
2092	/*
2093	 * Do we need to action generation change?
2094	 */
2095	if (unlikely(ecm_db_connection_regeneration_required_check(ci))) {
2096		ecm_sfe_ipv6_connection_regenerate(ci, sender, out_dev, in_dev);
2097	}
2098
2099	/*
2100	 * Iterate the assignments and call to process!
2101	 * Policy implemented:
2102	 * 1. Classifiers that say they are not relevant are unassigned and not actioned further.
2103	 * 2. Any drop command from any classifier is honoured.
2104	 * 3. All classifiers must action acceleration for accel to be honoured, any classifiers not sure of their relevance will stop acceleration.
2105	 * 4. Only the highest priority classifier, that actions it, will have its qos tag honoured.
2106	 * 5. Only the highest priority classifier, that actions it, will have its timer group honoured.
2107	 */
2108	DEBUG_TRACE("%p: process begin, skb: %p\n", ci, skb);
2109	prevalent_pr.process_actions = 0;
2110	prevalent_pr.drop = false;
2111	prevalent_pr.flow_qos_tag = skb->priority;
2112	prevalent_pr.return_qos_tag = skb->priority;
2113	prevalent_pr.accel_mode = ECM_CLASSIFIER_ACCELERATION_MODE_ACCEL;
2114	prevalent_pr.timer_group = ci_orig_timer_group = ecm_db_connection_timer_group_get(ci);
2115
2116	assignment_count = ecm_db_connection_classifier_assignments_get_and_ref(ci, assignments);
2117	for (aci_index = 0; aci_index < assignment_count; ++aci_index) {
2118		struct ecm_classifier_process_response aci_pr;
2119		struct ecm_classifier_instance *aci;
2120
2121		aci = assignments[aci_index];
2122		DEBUG_TRACE("%p: process: %p, type: %d\n", ci, aci, aci->type_get(aci));
2123		aci->process(aci, sender, iph, skb, &aci_pr);
2124		DEBUG_TRACE("%p: aci_pr: process actions: %x, became relevant: %u, relevance: %d, drop: %d, "
2125				"flow_qos_tag: %u, return_qos_tag: %u, accel_mode: %x, timer_group: %d\n",
2126				ci, aci_pr.process_actions, aci_pr.became_relevant, aci_pr.relevance, aci_pr.drop,
2127				aci_pr.flow_qos_tag, aci_pr.return_qos_tag, aci_pr.accel_mode, aci_pr.timer_group);
2128
2129		if (aci_pr.relevance == ECM_CLASSIFIER_RELEVANCE_NO) {
2130			ecm_classifier_type_t aci_type;
2131
2132			/*
2133			 * This classifier can be unassigned - PROVIDED it is not the default classifier
2134			 */
2135			aci_type = aci->type_get(aci);
2136			if (aci_type == ECM_CLASSIFIER_TYPE_DEFAULT) {
2137				continue;
2138			}
2139
2140			DEBUG_INFO("%p: Classifier not relevant, unassign: %d", ci, aci_type);
2141			ecm_db_connection_classifier_unassign(ci, aci);
2142			continue;
2143		}
2144
2145		/*
2146		 * Yes or Maybe relevant.
2147		 */
2148		if (aci_pr.process_actions & ECM_CLASSIFIER_PROCESS_ACTION_DROP) {
2149			/*
2150			 * Drop command from any classifier is actioned.
2151			 */
2152			DEBUG_TRACE("%p: wants drop: %p, type: %d, skb: %p\n", ci, aci, aci->type_get(aci), skb);
2153			prevalent_pr.drop |= aci_pr.drop;
2154		}
2155
2156		/*
2157		 * Accel mode permission
2158		 */
2159		if (aci_pr.relevance == ECM_CLASSIFIER_RELEVANCE_MAYBE) {
2160			/*
2161			 * Classifier not sure of its relevance - cannot accel yet
2162			 */
2163			DEBUG_TRACE("%p: accel denied by maybe: %p, type: %d\n", ci, aci, aci->type_get(aci));
2164			prevalent_pr.accel_mode = ECM_CLASSIFIER_ACCELERATION_MODE_NO;
2165		} else {
2166			if (aci_pr.process_actions & ECM_CLASSIFIER_PROCESS_ACTION_ACCEL_MODE) {
2167				if (aci_pr.accel_mode == ECM_CLASSIFIER_ACCELERATION_MODE_NO) {
2168					DEBUG_TRACE("%p: accel denied: %p, type: %d\n", ci, aci, aci->type_get(aci));
2169					prevalent_pr.accel_mode = ECM_CLASSIFIER_ACCELERATION_MODE_NO;
2170				}
2171				/* else yes or don't care about accel */
2172			}
2173		}
2174
2175		/*
2176		 * Timer group (the last classifier i.e. the highest priority one) will 'win'
2177		 */
2178		if (aci_pr.process_actions & ECM_CLASSIFIER_PROCESS_ACTION_TIMER_GROUP) {
2179			DEBUG_TRACE("%p: timer group: %p, type: %d, group: %d\n", ci, aci, aci->type_get(aci), aci_pr.timer_group);
2180			prevalent_pr.timer_group = aci_pr.timer_group;
2181		}
2182
2183		/*
2184		 * Qos tag (the last classifier i.e. the highest priority one) will 'win'
2185		 */
2186		if (aci_pr.process_actions & ECM_CLASSIFIER_PROCESS_ACTION_QOS_TAG) {
2187			DEBUG_TRACE("%p: aci: %p, type: %d, flow qos tag: %u, return qos tag: %u\n",
2188					ci, aci, aci->type_get(aci), aci_pr.flow_qos_tag, aci_pr.return_qos_tag);
2189			prevalent_pr.flow_qos_tag = aci_pr.flow_qos_tag;
2190			prevalent_pr.return_qos_tag = aci_pr.return_qos_tag;
2191		}
2192
2193#ifdef ECM_CLASSIFIER_DSCP_ENABLE
2194		/*
2195		 * If any classifier denied DSCP remarking then that overrides every classifier
2196		 */
2197		if (aci_pr.process_actions & ECM_CLASSIFIER_PROCESS_ACTION_DSCP_DENY) {
2198			DEBUG_TRACE("%p: aci: %p, type: %d, DSCP remark denied\n",
2199					ci, aci, aci->type_get(aci));
2200			prevalent_pr.process_actions |= ECM_CLASSIFIER_PROCESS_ACTION_DSCP_DENY;
2201			prevalent_pr.process_actions &= ~ECM_CLASSIFIER_PROCESS_ACTION_DSCP;
2202		}
2203
2204		/*
2205		 * DSCP remark action, but only if it has not been denied by any classifier
2206		 */
2207		if (aci_pr.process_actions & ECM_CLASSIFIER_PROCESS_ACTION_DSCP) {
2208			if (!(prevalent_pr.process_actions & ECM_CLASSIFIER_PROCESS_ACTION_DSCP_DENY)) {
2209				DEBUG_TRACE("%p: aci: %p, type: %d, DSCP remark wanted, flow_dscp: %u, return dscp: %u\n",
2210						ci, aci, aci->type_get(aci), aci_pr.flow_dscp, aci_pr.return_dscp);
2211				prevalent_pr.process_actions |= ECM_CLASSIFIER_PROCESS_ACTION_DSCP;
2212				prevalent_pr.flow_dscp = aci_pr.flow_dscp;
2213				prevalent_pr.return_dscp = aci_pr.return_dscp;
2214			}
2215		}
2216#endif
2217	}
2218	ecm_db_connection_assignments_release(assignment_count, assignments);
2219
2220	/*
2221	 * Change timer group?
2222	 */
2223	if (ci_orig_timer_group != prevalent_pr.timer_group) {
2224		DEBUG_TRACE("%p: change timer group from: %d to: %d\n", ci, ci_orig_timer_group, prevalent_pr.timer_group);
2225		ecm_db_connection_defunct_timer_reset(ci, prevalent_pr.timer_group);
2226	}
2227
2228	/*
2229	 * Drop?
2230	 */
2231	if (prevalent_pr.drop) {
2232		DEBUG_TRACE("%p: drop: %p\n", ci, skb);
2233		ecm_db_connection_data_totals_update_dropped(ci, (sender == ECM_TRACKER_SENDER_TYPE_SRC)? true : false, skb->len, 1);
2234		ecm_db_connection_deref(ci);
2235		return NF_ACCEPT;
2236	}
2237	ecm_db_connection_data_totals_update(ci, (sender == ECM_TRACKER_SENDER_TYPE_SRC)? true : false, skb->len, 1);
2238
2239	/*
2240	 * Assign qos tag
2241	 * GGG TODO Should we use sender to identify whether to use flow or return qos tag?
2242	 */
2243	skb->priority = prevalent_pr.flow_qos_tag;
2244	DEBUG_TRACE("%p: skb priority: %u\n", ci, skb->priority);
2245
2246	/*
2247	 * Accelerate?
2248	 */
2249	if (prevalent_pr.accel_mode == ECM_CLASSIFIER_ACCELERATION_MODE_ACCEL) {
2250		struct ecm_front_end_connection_instance *feci;
2251		DEBUG_TRACE("%p: accel\n", ci);
2252		feci = ecm_db_connection_front_end_get_and_ref(ci);
2253		ecm_sfe_ported_ipv6_connection_accelerate(feci, &prevalent_pr, ct, is_l2_encap);
2254		feci->deref(feci);
2255	}
2256	ecm_db_connection_deref(ci);
2257
2258	return NF_ACCEPT;
2259}
2260
2261/*
2262 * ecm_sfe_ported_ipv6_debugfs_init()
2263 */
2264bool ecm_sfe_ported_ipv6_debugfs_init(struct dentry *dentry)
2265{
2266	struct dentry *udp_dentry;
2267
2268	udp_dentry = debugfs_create_u32("udp_accelerated_count", S_IRUGO, dentry,
2269						&ecm_sfe_ported_ipv6_accelerated_count[ECM_SFE_PORTED_IPV6_PROTO_UDP]);
2270	if (!udp_dentry) {
2271		DEBUG_ERROR("Failed to create ecm sfe ipv6 udp_accelerated_count file in debugfs\n");
2272		return false;
2273	}
2274
2275	if (!debugfs_create_u32("tcp_accelerated_count", S_IRUGO, dentry,
2276					&ecm_sfe_ported_ipv6_accelerated_count[ECM_SFE_PORTED_IPV6_PROTO_TCP])) {
2277		DEBUG_ERROR("Failed to create ecm sfe ipv6 tcp_accelerated_count file in debugfs\n");
2278		debugfs_remove(udp_dentry);
2279		return false;
2280	}
2281
2282	return true;
2283}
2284
2285