1/* 2 ************************************************************************** 3 * Copyright (c) 2015 The Linux Foundation. All rights reserved. 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all copies. 7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT 13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 ************************************************************************** 15 */ 16 17#include <linux/version.h> 18#include <linux/types.h> 19#include <linux/ip.h> 20#include <linux/tcp.h> 21#include <linux/module.h> 22#include <linux/skbuff.h> 23#include <linux/icmp.h> 24#include <linux/debugfs.h> 25#include <linux/kthread.h> 26#include <linux/pkt_sched.h> 27#include <linux/string.h> 28#include <net/ip6_route.h> 29#include <net/ip6_fib.h> 30#include <net/addrconf.h> 31#include <net/ipv6.h> 32#include <net/tcp.h> 33#include <asm/unaligned.h> 34#include <asm/uaccess.h> /* for put_user */ 35#include <net/ipv6.h> 36#include <linux/inet.h> 37#include <linux/in6.h> 38#include <linux/udp.h> 39#include <linux/tcp.h> 40#include <linux/inetdevice.h> 41#include <linux/if_arp.h> 42#include <linux/netfilter_ipv6.h> 43#include <linux/netfilter_bridge.h> 44#include <linux/if_bridge.h> 45#include <net/arp.h> 46#include <net/netfilter/nf_conntrack.h> 47#include <net/netfilter/nf_conntrack_acct.h> 48#include <net/netfilter/nf_conntrack_helper.h> 49#include <net/netfilter/nf_conntrack_l4proto.h> 50#include <net/netfilter/nf_conntrack_l3proto.h> 51#include <net/netfilter/nf_conntrack_zones.h> 52#include <net/netfilter/nf_conntrack_core.h> 53#include <net/netfilter/ipv6/nf_conntrack_ipv6.h> 54#include <net/netfilter/ipv6/nf_defrag_ipv6.h> 55#ifdef ECM_INTERFACE_VLAN_ENABLE 56#include <linux/../../net/8021q/vlan.h> 57#include <linux/if_vlan.h> 58#endif 59 60/* 61 * Debug output levels 62 * 0 = OFF 63 * 1 = ASSERTS / ERRORS 64 * 2 = 1 + WARN 65 * 3 = 2 + INFO 66 * 4 = 3 + TRACE 67 */ 68#define DEBUG_LEVEL ECM_SFE_NON_PORTED_IPV6_DEBUG_LEVEL 69 70#include <sfe_drv.h> 71 72#include "ecm_types.h" 73#include "ecm_db_types.h" 74#include "ecm_state.h" 75#include "ecm_tracker.h" 76#include "ecm_classifier.h" 77#include "ecm_front_end_types.h" 78#include "ecm_tracker_datagram.h" 79#include "ecm_tracker_udp.h" 80#include "ecm_tracker_tcp.h" 81#include "ecm_db.h" 82#include "ecm_classifier_default.h" 83#include "ecm_interface.h" 84#include "ecm_sfe_non_ported_ipv6.h" 85#include "ecm_sfe_ipv6.h" 86#include "ecm_sfe_common.h" 87 88/* 89 * Magic numbers 90 */ 91#define ECM_SFE_NON_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC 0xECBC 92 93/* 94 * struct ecm_sfe_non_ported_ipv6_connection_instance 95 * A connection specific front end instance for Non-Ported connections 96 */ 97struct ecm_sfe_non_ported_ipv6_connection_instance { 98 struct ecm_front_end_connection_instance base; /* Base class */ 99#if (DEBUG_LEVEL > 0) 100 uint16_t magic; 101#endif 102}; 103 104static int ecm_sfe_non_ported_ipv6_accelerated_count = 0; /* Number of Non-Ported connections currently offloaded */ 105 106/* 107 * ecm_sfe_non_ported_ipv6_connection_callback() 108 * Callback for handling create ack/nack calls. 109 */ 110static void ecm_sfe_non_ported_ipv6_connection_callback(void *app_data, struct sfe_ipv6_msg *nim) 111{ 112 struct sfe_ipv6_rule_create_msg *nircm = &nim->msg.rule_create; 113 uint32_t serial = (uint32_t)app_data; 114 struct ecm_db_connection_instance *ci; 115 struct ecm_front_end_connection_instance *feci; 116 struct ecm_sfe_non_ported_ipv6_connection_instance *nnpci; 117 ip_addr_t flow_ip; 118 ip_addr_t return_ip; 119 ecm_front_end_acceleration_mode_t result_mode; 120 121 /* 122 * Is this a response to a create message? 123 */ 124 if (nim->cm.type != SFE_TX_CREATE_RULE_MSG) { 125 DEBUG_ERROR("%p: non_ported create callback with improper type: %d, serial: %u\n", nim, nim->cm.type, serial); 126 return; 127 } 128 129 /* 130 * Look up ecm connection so that we can update the status. 131 */ 132 ci = ecm_db_connection_serial_find_and_ref(serial); 133 if (!ci) { 134 DEBUG_TRACE("%p: create callback, connection not found, serial: %u\n", nim, serial); 135 return; 136 } 137 138 /* 139 * Release ref held for this ack/nack response. 140 * NOTE: It's okay to do this here, ci won't go away, because the ci is held as 141 * a result of the ecm_db_connection_serial_find_and_ref() 142 */ 143 ecm_db_connection_deref(ci); 144 145 /* 146 * Get the front end instance 147 */ 148 feci = ecm_db_connection_front_end_get_and_ref(ci); 149 nnpci = (struct ecm_sfe_non_ported_ipv6_connection_instance *)feci; 150 DEBUG_CHECK_MAGIC(nnpci, ECM_SFE_NON_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", nnpci); 151 152 ECM_SFE_IPV6_ADDR_TO_IP_ADDR(flow_ip, nircm->tuple.flow_ip); 153 ECM_SFE_IPV6_ADDR_TO_IP_ADDR(return_ip, nircm->tuple.return_ip); 154 155 /* 156 * Record command duration 157 */ 158 ecm_sfe_ipv6_accel_done_time_update(feci); 159 160 /* 161 * Dump some useful trace information. 162 */ 163 DEBUG_TRACE("%p: accelerate response for connection: %p, serial: %u\n", nnpci, feci->ci, serial); 164 DEBUG_TRACE("%p: rule_flags: %x, valid_flags: %x\n", nnpci, nircm->rule_flags, nircm->valid_flags); 165 DEBUG_TRACE("%p: flow_ip: " ECM_IP_ADDR_OCTAL_FMT ":%d\n", nnpci, ECM_IP_ADDR_TO_OCTAL(flow_ip), nircm->tuple.flow_ident); 166 DEBUG_TRACE("%p: return_ip: " ECM_IP_ADDR_OCTAL_FMT ":%d\n", nnpci, ECM_IP_ADDR_TO_OCTAL(return_ip), nircm->tuple.return_ident); 167 DEBUG_TRACE("%p: protocol: %d\n", nnpci, nircm->tuple.protocol); 168 169 /* 170 * Handle the creation result code. 171 */ 172 DEBUG_TRACE("%p: response: %d\n", nnpci, nim->cm.response); 173 if (nim->cm.response != SFE_CMN_RESPONSE_ACK) { 174 /* 175 * Creation command failed (specific reason ignored). 176 */ 177 DEBUG_TRACE("%p: accel nack: %d\n", nnpci, nim->cm.error); 178 spin_lock_bh(&feci->lock); 179 DEBUG_ASSERT(feci->accel_mode == ECM_FRONT_END_ACCELERATION_MODE_ACCEL_PENDING, "%p: Unexpected mode: %d\n", ci, feci->accel_mode); 180 feci->stats.ae_nack++; 181 feci->stats.ae_nack_total++; 182 if (feci->stats.ae_nack >= feci->stats.ae_nack_limit) { 183 /* 184 * Too many SFE rejections 185 */ 186 result_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_ACCEL_ENGINE; 187 } else { 188 /* 189 * Revert to decelerated 190 */ 191 result_mode = ECM_FRONT_END_ACCELERATION_MODE_DECEL; 192 } 193 194 /* 195 * TODO: Why is this differnt than IPv4? 196 * Clear any decelerate pending flag since we aren't accelerated anyway we can just clear this whether it is set or not 197 */ 198 feci->stats.decelerate_pending = false; 199 200 /* 201 * If connection is now defunct then set mode to ensure no further accel attempts occur 202 */ 203 if (feci->is_defunct) { 204 result_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT; 205 } 206 207 spin_lock_bh(&ecm_sfe_ipv6_lock); 208 _ecm_sfe_ipv6_accel_pending_clear(feci, result_mode); 209 spin_unlock_bh(&ecm_sfe_ipv6_lock); 210 211 spin_unlock_bh(&feci->lock); 212 213 /* 214 * Release the connection. 215 */ 216 feci->deref(feci); 217 ecm_db_connection_deref(ci); 218 return; 219 } 220 221 spin_lock_bh(&feci->lock); 222 DEBUG_ASSERT(feci->accel_mode == ECM_FRONT_END_ACCELERATION_MODE_ACCEL_PENDING, "%p: Unexpected mode: %d\n", ci, feci->accel_mode); 223 224 /* 225 * If a flush occured before we got the ACK then our acceleration was effectively cancelled on us 226 * GGG TODO This is a workaround for a SFE message OOO quirk, this should eventually be removed. 227 */ 228 if (feci->stats.flush_happened) { 229 feci->stats.flush_happened = false; 230 231 /* 232 * Increment the no-action counter. Our connectin was decelerated on us with no action occurring. 233 */ 234 feci->stats.no_action_seen++; 235 236 spin_lock_bh(&ecm_sfe_ipv6_lock); 237 _ecm_sfe_ipv6_accel_pending_clear(feci, ECM_FRONT_END_ACCELERATION_MODE_DECEL); 238 spin_unlock_bh(&ecm_sfe_ipv6_lock); 239 240 spin_unlock_bh(&feci->lock); 241 242 /* 243 * Release the connection. 244 */ 245 feci->deref(feci); 246 ecm_db_connection_deref(ci); 247 return; 248 } 249 250 /* 251 * Create succeeded 252 */ 253 254 /* 255 * Clear any nack count 256 */ 257 feci->stats.ae_nack = 0; 258 259 /* 260 * Clear the "accelerate pending" state and move to "accelerated" state bumping 261 * the accelerated counters to match our new state. 262 * 263 * Decelerate may have been attempted while we were "pending accel" and 264 * this function will return true if that was the case. 265 * If decelerate was pending then we need to begin deceleration :-( 266 */ 267 spin_lock_bh(&ecm_sfe_ipv6_lock); 268 269 ecm_sfe_non_ported_ipv6_accelerated_count++; /* Protocol specific counter */ 270 ecm_sfe_ipv6_accelerated_count++; /* General running counter */ 271 272 if (!_ecm_sfe_ipv6_accel_pending_clear(feci, ECM_FRONT_END_ACCELERATION_MODE_ACCEL)) { 273 /* 274 * Increment the no-action counter, this is reset if offload action is seen 275 */ 276 feci->stats.no_action_seen++; 277 278 spin_unlock_bh(&ecm_sfe_ipv6_lock); 279 spin_unlock_bh(&feci->lock); 280 281 /* 282 * Release the connection. 283 */ 284 feci->deref(feci); 285 ecm_db_connection_deref(ci); 286 return; 287 } 288 289 DEBUG_INFO("%p: Decelerate was pending\n", ci); 290 291 spin_unlock_bh(&ecm_sfe_ipv6_lock); 292 spin_unlock_bh(&feci->lock); 293 294 feci->decelerate(feci); 295 296 /* 297 * Release the connection. 298 */ 299 feci->deref(feci); 300 ecm_db_connection_deref(ci); 301} 302 303/* 304 * ecm_sfe_non_ported_ipv6_connection_accelerate() 305 * Accelerate a connection 306 * 307 * GGG TODO Refactor this function into a single function that np, udp and tcp 308 * can all use and reduce the amount of code! 309 */ 310static void ecm_sfe_non_ported_ipv6_connection_accelerate(struct ecm_front_end_connection_instance *feci, 311 struct ecm_classifier_process_response *pr, bool is_l2_encap) 312{ 313 struct ecm_sfe_non_ported_ipv6_connection_instance *nnpci = (struct ecm_sfe_non_ported_ipv6_connection_instance *)feci; 314 uint16_t regen_occurrances; 315 int protocol; 316 int32_t from_ifaces_first; 317 int32_t to_ifaces_first; 318 struct ecm_db_iface_instance *from_ifaces[ECM_DB_IFACE_HEIRARCHY_MAX]; 319 struct ecm_db_iface_instance *to_ifaces[ECM_DB_IFACE_HEIRARCHY_MAX]; 320 struct ecm_db_iface_instance *from_sfe_iface; 321 struct ecm_db_iface_instance *to_sfe_iface; 322 int32_t from_sfe_iface_id; 323 int32_t to_sfe_iface_id; 324 uint8_t from_sfe_iface_address[ETH_ALEN]; 325 uint8_t to_sfe_iface_address[ETH_ALEN]; 326 struct sfe_ipv6_msg nim; 327 struct sfe_ipv6_rule_create_msg *nircm; 328 struct ecm_classifier_instance *assignments[ECM_CLASSIFIER_TYPES]; 329 int aci_index; 330 int assignment_count; 331 sfe_tx_status_t sfe_tx_status; 332 int32_t list_index; 333 int32_t interface_type_counts[ECM_DB_IFACE_TYPE_COUNT]; 334 bool rule_invalid; 335 ip_addr_t src_ip; 336 ip_addr_t dest_ip; 337 ecm_front_end_acceleration_mode_t result_mode; 338 339 DEBUG_CHECK_MAGIC(nnpci, ECM_SFE_NON_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", nnpci); 340 341 /* 342 * Get the re-generation occurrance counter of the connection. 343 * We compare it again at the end - to ensure that the rule construction has seen no generation 344 * changes during rule creation. 345 */ 346 regen_occurrances = ecm_db_connection_regeneration_occurrances_get(feci->ci); 347 348 /* 349 * For non-ported protocols we only support IPv6 in 4 or ESP 350 */ 351 protocol = ecm_db_connection_protocol_get(feci->ci); 352 if ((protocol != IPPROTO_IPIP) && (protocol != IPPROTO_ESP)) { 353 spin_lock_bh(&feci->lock); 354 feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_RULE; 355 spin_unlock_bh(&feci->lock); 356 DEBUG_TRACE("%p: unsupported protocol: %d\n", nnpci, protocol); 357 return; 358 } 359 360 /* 361 * Test if acceleration is permitted 362 */ 363 if (!ecm_sfe_ipv6_accel_pending_set(feci)) { 364 DEBUG_TRACE("%p: Acceleration not permitted: %p\n", feci, feci->ci); 365 return; 366 } 367 368 /* 369 * Okay construct an accel command. 370 * Initialise creation structure. 371 * NOTE: We leverage the app_data void pointer to be our 32 bit connection serial number. 372 * When we get it back we re-cast it to a uint32 and do a faster connection lookup. 373 */ 374 memset(&nim, 0, sizeof(struct sfe_ipv6_msg)); 375 sfe_ipv6_msg_init(&nim, SFE_SPECIAL_INTERFACE_IPV6, SFE_TX_CREATE_RULE_MSG, 376 sizeof(struct sfe_ipv6_rule_create_msg), 377 ecm_sfe_non_ported_ipv6_connection_callback, 378 (void *)ecm_db_connection_serial_get(feci->ci)); 379 380 nircm = &nim.msg.rule_create; 381 nircm->valid_flags = 0; 382 nircm->rule_flags = 0; 383 384 /* 385 * Initialize VLAN tag information 386 */ 387 nircm->vlan_primary_rule.ingress_vlan_tag = SFE_VLAN_ID_NOT_CONFIGURED; 388 nircm->vlan_primary_rule.egress_vlan_tag = SFE_VLAN_ID_NOT_CONFIGURED; 389 nircm->vlan_secondary_rule.ingress_vlan_tag = SFE_VLAN_ID_NOT_CONFIGURED; 390 nircm->vlan_secondary_rule.egress_vlan_tag = SFE_VLAN_ID_NOT_CONFIGURED; 391 392 /* 393 * Get the interface lists of the connection, we must have at least one interface in the list to continue 394 */ 395 from_ifaces_first = ecm_db_connection_from_interfaces_get_and_ref(feci->ci, from_ifaces); 396 if (from_ifaces_first == ECM_DB_IFACE_HEIRARCHY_MAX) { 397 DEBUG_WARN("%p: Accel attempt failed - no interfaces in from_interfaces list!\n", nnpci); 398 goto non_ported_accel_bad_rule; 399 } 400 401 to_ifaces_first = ecm_db_connection_to_interfaces_get_and_ref(feci->ci, to_ifaces); 402 if (to_ifaces_first == ECM_DB_IFACE_HEIRARCHY_MAX) { 403 DEBUG_WARN("%p: Accel attempt failed - no interfaces in to_interfaces list!\n", nnpci); 404 ecm_db_connection_interfaces_deref(from_ifaces, from_ifaces_first); 405 goto non_ported_accel_bad_rule; 406 } 407 408 /* 409 * First interface in each must be a known sfe interface 410 */ 411 from_sfe_iface = from_ifaces[from_ifaces_first]; 412 to_sfe_iface = to_ifaces[to_ifaces_first]; 413 from_sfe_iface_id = ecm_db_iface_ae_interface_identifier_get(from_sfe_iface); 414 to_sfe_iface_id = ecm_db_iface_ae_interface_identifier_get(to_sfe_iface); 415 if ((from_sfe_iface_id < 0) || (to_sfe_iface_id < 0)) { 416 DEBUG_TRACE("%p: from_sfe_iface_id: %d, to_sfe_iface_id: %d\n", nnpci, from_sfe_iface_id, to_sfe_iface_id); 417 ecm_db_connection_interfaces_deref(from_ifaces, from_ifaces_first); 418 ecm_db_connection_interfaces_deref(to_ifaces, to_ifaces_first); 419 goto non_ported_accel_bad_rule; 420 } 421 422 /* 423 * New rule being created 424 */ 425 nircm->valid_flags |= SFE_RULE_CREATE_CONN_VALID; 426 427 /* 428 * Set interface numbers involved in accelerating this connection. 429 * These are the outer facing addresses from the heirarchy interface lists we got above. 430 * These may be overridden later if we detect special interface types e.g. ipsec. 431 */ 432 nircm->conn_rule.flow_interface_num = from_sfe_iface_id; 433 nircm->conn_rule.return_interface_num = to_sfe_iface_id; 434 435 /* 436 * Set interface numbers involved in accelerating this connection. 437 * These are the inner facing addresses from the heirarchy interface lists we got above. 438 */ 439 nim.msg.rule_create.conn_rule.flow_top_interface_num = ecm_db_iface_interface_identifier_get(from_ifaces[ECM_DB_IFACE_HEIRARCHY_MAX-1]); 440 nim.msg.rule_create.conn_rule.return_top_interface_num = ecm_db_iface_interface_identifier_get(to_ifaces[ECM_DB_IFACE_HEIRARCHY_MAX-1]); 441 442 /* 443 * We know that each outward facing interface is known to the SFE and so this connection could be accelerated. 444 * However the lists may also specify other interesting details that must be included in the creation command, 445 * for example, ethernet MAC, VLAN tagging or PPPoE session information. 446 * We get this information by walking from the outer to the innermost interface for each list and examine the interface types. 447 * 448 * Start with the 'from' (src) side. 449 * NOTE: The lists may contain a complex heirarchy of similar type of interface e.g. multiple vlans or tunnels within tunnels. 450 * This SFE cannot handle that - there is no way to describe this in the rule - if we see multiple types that would conflict we have to abort. 451 */ 452 DEBUG_TRACE("%p: Examine from/src heirarchy list\n", nnpci); 453 memset(interface_type_counts, 0, sizeof(interface_type_counts)); 454 rule_invalid = false; 455 for (list_index = from_ifaces_first; !rule_invalid && (list_index < ECM_DB_IFACE_HEIRARCHY_MAX); list_index++) { 456 struct ecm_db_iface_instance *ii; 457 ecm_db_iface_type_t ii_type; 458 char *ii_name; 459 460 ii = from_ifaces[list_index]; 461 ii_type = ecm_db_connection_iface_type_get(ii); 462 ii_name = ecm_db_interface_type_to_string(ii_type); 463 DEBUG_TRACE("%p: list_index: %d, ii: %p, type: %d (%s)\n", nnpci, list_index, ii, ii_type, ii_name); 464 465 /* 466 * Extract information from this interface type if it is applicable to the rule. 467 * Conflicting information may cause accel to be unsupported. 468 */ 469 switch (ii_type) { 470#ifdef ECM_INTERFACE_PPP_ENABLE 471 struct ecm_db_interface_info_pppoe pppoe_info; 472#endif 473#ifdef ECM_INTERFACE_VLAN_ENABLE 474 struct ecm_db_interface_info_vlan vlan_info; 475 uint32_t vlan_value = 0; 476 struct net_device *vlan_in_dev = NULL; 477#endif 478 479 case ECM_DB_IFACE_TYPE_BRIDGE: 480 DEBUG_TRACE("%p: Bridge\n", nnpci); 481 if (interface_type_counts[ii_type] != 0) { 482 /* 483 * Cannot cascade bridges 484 */ 485 rule_invalid = true; 486 DEBUG_TRACE("%p: Bridge - ignore additional\n", nnpci); 487 break; 488 } 489 ecm_db_iface_bridge_address_get(ii, from_sfe_iface_address); 490 DEBUG_TRACE("%p: Bridge - mac: %pM\n", nnpci, from_sfe_iface_address); 491 break; 492 case ECM_DB_IFACE_TYPE_ETHERNET: 493 DEBUG_TRACE("%p: Ethernet\n", nnpci); 494 if (interface_type_counts[ii_type] != 0) { 495 /* 496 * Ignore additional mac addresses, these are usually as a result of address propagation 497 * from bridges down to ports etc. 498 */ 499 DEBUG_TRACE("%p: Ethernet - ignore additional\n", nnpci); 500 break; 501 } 502 503 /* 504 * Can only handle one MAC, the first outermost mac. 505 */ 506 ecm_db_iface_ethernet_address_get(ii, from_sfe_iface_address); 507 DEBUG_TRACE("%p: Ethernet - mac: %pM\n", nnpci, from_sfe_iface_address); 508 break; 509 case ECM_DB_IFACE_TYPE_PPPOE: 510#ifdef ECM_INTERFACE_PPP_ENABLE 511 /* 512 * More than one PPPoE in the list is not valid! 513 */ 514 if (interface_type_counts[ii_type] != 0) { 515 DEBUG_TRACE("%p: PPPoE - additional unsupported\n", nnpci); 516 rule_invalid = true; 517 break; 518 } 519 520 /* 521 * Copy pppoe session info to the creation structure. 522 */ 523 ecm_db_iface_pppoe_session_info_get(ii, &pppoe_info); 524 525 nircm->pppoe_rule.flow_pppoe_session_id = pppoe_info.pppoe_session_id; 526 memcpy(nircm->pppoe_rule.flow_pppoe_remote_mac, pppoe_info.remote_mac, ETH_ALEN); 527 nircm->valid_flags |= SFE_RULE_CREATE_PPPOE_VALID; 528 529 DEBUG_TRACE("%p: PPPoE - session: %x, mac: %pM\n", nnpci, 530 nircm->pppoe_rule.flow_pppoe_session_id, 531 nircm->pppoe_rule.flow_pppoe_remote_mac); 532#else 533 rule_invalid = true; 534#endif 535 break; 536 case ECM_DB_IFACE_TYPE_VLAN: 537#ifdef ECM_INTERFACE_VLAN_ENABLE 538 DEBUG_TRACE("%p: VLAN\n", nnpci); 539 if (interface_type_counts[ii_type] > 1) { 540 /* 541 * Can only support two vlans 542 */ 543 rule_invalid = true; 544 DEBUG_TRACE("%p: VLAN - additional unsupported\n", nnpci); 545 break; 546 } 547 ecm_db_iface_vlan_info_get(ii, &vlan_info); 548 vlan_value = ((vlan_info.vlan_tpid << 16) | vlan_info.vlan_tag); 549 550 /* 551 * Look up the vlan device and incorporate the vlan priority into the vlan_value 552 */ 553 vlan_in_dev = dev_get_by_index(&init_net, ecm_db_iface_interface_identifier_get(ii)); 554 if (vlan_in_dev) { 555 vlan_value |= vlan_dev_get_egress_prio(vlan_in_dev, pr->return_qos_tag); 556 dev_put(vlan_in_dev); 557 vlan_in_dev = NULL; 558 } 559 560 /* 561 * Primary or secondary (QinQ) VLAN? 562 */ 563 if (interface_type_counts[ii_type] == 0) { 564 nircm->vlan_primary_rule.ingress_vlan_tag = vlan_value; 565 } else { 566 nircm->vlan_secondary_rule.ingress_vlan_tag = vlan_value; 567 } 568 nircm->valid_flags |= SFE_RULE_CREATE_VLAN_VALID; 569 570 /* 571 * If we have not yet got an ethernet mac then take this one (very unlikely as mac should have been propagated to the slave (outer) device 572 */ 573 if (interface_type_counts[ECM_DB_IFACE_TYPE_ETHERNET] == 0) { 574 memcpy(from_sfe_iface_address, vlan_info.address, ETH_ALEN); 575 interface_type_counts[ECM_DB_IFACE_TYPE_ETHERNET]++; 576 DEBUG_TRACE("%p: VLAN use mac: %pM\n", nnpci, from_sfe_iface_address); 577 } 578 DEBUG_TRACE("%p: vlan tag: %x\n", nnpci, vlan_value); 579#else 580 rule_invalid = true; 581 DEBUG_TRACE("%p: VLAN - unsupported\n", nnpci); 582#endif 583 break; 584 case ECM_DB_IFACE_TYPE_IPSEC_TUNNEL: 585#ifdef ECM_INTERFACE_IPSEC_ENABLE 586 DEBUG_TRACE("%p: IPSEC\n", nnpci); 587 if (interface_type_counts[ii_type] != 0) { 588 /* 589 * Can only support one ipsec 590 */ 591 rule_invalid = true; 592 DEBUG_TRACE("%p: IPSEC - additional unsupported\n", nnpci); 593 break; 594 } 595 nircm->conn_rule.flow_interface_num = SFE_SPECIAL_INTERFACE_IPSEC; 596#else 597 rule_invalid = true; 598 DEBUG_TRACE("%p: IPSEC - unsupported\n", nnpci); 599#endif 600 break; 601 default: 602 DEBUG_TRACE("%p: Ignoring: %d (%s)\n", nnpci, ii_type, ii_name); 603 } 604 605 /* 606 * Seen an interface of this type 607 */ 608 interface_type_counts[ii_type]++; 609 } 610 if (rule_invalid) { 611 DEBUG_WARN("%p: from/src Rule invalid\n", nnpci); 612 ecm_db_connection_interfaces_deref(from_ifaces, from_ifaces_first); 613 ecm_db_connection_interfaces_deref(to_ifaces, to_ifaces_first); 614 goto non_ported_accel_bad_rule; 615 } 616 617 /* 618 * Now examine the TO / DEST heirarchy list to construct the destination part of the rule 619 */ 620 DEBUG_TRACE("%p: Examine to/dest heirarchy list\n", nnpci); 621 memset(interface_type_counts, 0, sizeof(interface_type_counts)); 622 rule_invalid = false; 623 for (list_index = to_ifaces_first; !rule_invalid && (list_index < ECM_DB_IFACE_HEIRARCHY_MAX); list_index++) { 624 struct ecm_db_iface_instance *ii; 625 ecm_db_iface_type_t ii_type; 626 char *ii_name; 627 628 ii = to_ifaces[list_index]; 629 ii_type = ecm_db_connection_iface_type_get(ii); 630 ii_name = ecm_db_interface_type_to_string(ii_type); 631 DEBUG_TRACE("%p: list_index: %d, ii: %p, type: %d (%s)\n", nnpci, list_index, ii, ii_type, ii_name); 632 633 /* 634 * Extract information from this interface type if it is applicable to the rule. 635 * Conflicting information may cause accel to be unsupported. 636 */ 637 switch (ii_type) { 638#ifdef ECM_INTERFACE_PPP_ENABLE 639 struct ecm_db_interface_info_pppoe pppoe_info; 640#endif 641#ifdef ECM_INTERFACE_VLAN_ENABLE 642 struct ecm_db_interface_info_vlan vlan_info; 643 uint32_t vlan_value = 0; 644 struct net_device *vlan_out_dev = NULL; 645#endif 646 case ECM_DB_IFACE_TYPE_BRIDGE: 647 DEBUG_TRACE("%p: Bridge\n", nnpci); 648 if (interface_type_counts[ii_type] != 0) { 649 /* 650 * Cannot cascade bridges 651 */ 652 rule_invalid = true; 653 DEBUG_TRACE("%p: Bridge - ignore additional\n", nnpci); 654 break; 655 } 656 ecm_db_iface_bridge_address_get(ii, to_sfe_iface_address); 657 DEBUG_TRACE("%p: Bridge - mac: %pM\n", nnpci, to_sfe_iface_address); 658 break; 659 case ECM_DB_IFACE_TYPE_ETHERNET: 660 DEBUG_TRACE("%p: Ethernet\n", nnpci); 661 if (interface_type_counts[ii_type] != 0) { 662 /* 663 * Ignore additional mac addresses, these are usually as a result of address propagation 664 * from bridges down to ports etc. 665 */ 666 DEBUG_TRACE("%p: Ethernet - ignore additional\n", nnpci); 667 break; 668 } 669 670 /* 671 * Can only handle one MAC, the first outermost mac. 672 */ 673 ecm_db_iface_ethernet_address_get(ii, to_sfe_iface_address); 674 DEBUG_TRACE("%p: Ethernet - mac: %pM\n", nnpci, to_sfe_iface_address); 675 break; 676 case ECM_DB_IFACE_TYPE_PPPOE: 677#ifdef ECM_INTERFACE_PPP_ENABLE 678 /* 679 * More than one PPPoE in the list is not valid! 680 */ 681 if (interface_type_counts[ii_type] != 0) { 682 DEBUG_TRACE("%p: PPPoE - additional unsupported\n", nnpci); 683 rule_invalid = true; 684 break; 685 } 686 687 /* 688 * Copy pppoe session info to the creation structure. 689 */ 690 ecm_db_iface_pppoe_session_info_get(ii, &pppoe_info); 691 nircm->pppoe_rule.return_pppoe_session_id = pppoe_info.pppoe_session_id; 692 memcpy(nircm->pppoe_rule.return_pppoe_remote_mac, pppoe_info.remote_mac, ETH_ALEN); 693 nircm->valid_flags |= SFE_RULE_CREATE_PPPOE_VALID; 694 695 DEBUG_TRACE("%p: PPPoE - session: %x, mac: %pM\n", nnpci, 696 nircm->pppoe_rule.return_pppoe_session_id, 697 nircm->pppoe_rule.return_pppoe_remote_mac); 698#else 699 rule_invalid = true; 700#endif 701 break; 702 case ECM_DB_IFACE_TYPE_VLAN: 703#ifdef ECM_INTERFACE_VLAN_ENABLE 704 DEBUG_TRACE("%p: VLAN\n", nnpci); 705 if (interface_type_counts[ii_type] > 1) { 706 /* 707 * Can only support two vlans 708 */ 709 rule_invalid = true; 710 DEBUG_TRACE("%p: VLAN - additional unsupported\n", nnpci); 711 break; 712 } 713 ecm_db_iface_vlan_info_get(ii, &vlan_info); 714 vlan_value = ((vlan_info.vlan_tpid << 16) | vlan_info.vlan_tag); 715 716 /* 717 * Look up the vlan device and incorporate the vlan priority into the vlan_value 718 */ 719 vlan_out_dev = dev_get_by_index(&init_net, ecm_db_iface_interface_identifier_get(ii)); 720 if (vlan_out_dev) { 721 vlan_value |= vlan_dev_get_egress_prio(vlan_out_dev, pr->flow_qos_tag); 722 dev_put(vlan_out_dev); 723 vlan_out_dev = NULL; 724 } 725 726 /* 727 * Primary or secondary (QinQ) VLAN? 728 */ 729 if (interface_type_counts[ii_type] == 0) { 730 nircm->vlan_primary_rule.egress_vlan_tag = vlan_value; 731 } else { 732 nircm->vlan_secondary_rule.egress_vlan_tag = vlan_value; 733 } 734 nircm->valid_flags |= SFE_RULE_CREATE_VLAN_VALID; 735 736 /* 737 * If we have not yet got an ethernet mac then take this one (very unlikely as mac should have been propagated to the slave (outer) device 738 */ 739 if (interface_type_counts[ECM_DB_IFACE_TYPE_ETHERNET] == 0) { 740 memcpy(to_sfe_iface_address, vlan_info.address, ETH_ALEN); 741 interface_type_counts[ECM_DB_IFACE_TYPE_ETHERNET]++; 742 DEBUG_TRACE("%p: VLAN use mac: %pM\n", nnpci, to_sfe_iface_address); 743 } 744 DEBUG_TRACE("%p: vlan tag: %x\n", nnpci, vlan_value); 745#else 746 rule_invalid = true; 747 DEBUG_TRACE("%p: VLAN - unsupported\n", nnpci); 748#endif 749 break; 750 case ECM_DB_IFACE_TYPE_IPSEC_TUNNEL: 751#ifdef ECM_INTERFACE_IPSEC_ENABLE 752 DEBUG_TRACE("%p: IPSEC\n", nnpci); 753 if (interface_type_counts[ii_type] != 0) { 754 /* 755 * Can only support one ipsec 756 */ 757 rule_invalid = true; 758 DEBUG_TRACE("%p: IPSEC - additional unsupported\n", nnpci); 759 break; 760 } 761 nircm->conn_rule.return_interface_num = SFE_SPECIAL_INTERFACE_IPSEC; 762#else 763 rule_invalid = true; 764 DEBUG_TRACE("%p: IPSEC - unsupported\n", nnpci); 765#endif 766 break; 767 default: 768 DEBUG_TRACE("%p: Ignoring: %d (%s)\n", nnpci, ii_type, ii_name); 769 } 770 771 /* 772 * Seen an interface of this type 773 */ 774 interface_type_counts[ii_type]++; 775 } 776 if (rule_invalid) { 777 DEBUG_WARN("%p: to/dest Rule invalid\n", nnpci); 778 ecm_db_connection_interfaces_deref(from_ifaces, from_ifaces_first); 779 ecm_db_connection_interfaces_deref(to_ifaces, to_ifaces_first); 780 goto non_ported_accel_bad_rule; 781 } 782 783 /* 784 * Routed or bridged? 785 */ 786 if (ecm_db_connection_is_routed_get(feci->ci)) { 787 nircm->rule_flags |= SFE_RULE_CREATE_FLAG_ROUTED; 788 } else { 789 nircm->rule_flags |= SFE_RULE_CREATE_FLAG_BRIDGE_FLOW; 790 if (is_l2_encap) { 791 nircm->rule_flags |= SFE_RULE_CREATE_FLAG_L2_ENCAP; 792 } 793 } 794 795 /* 796 * Set up the flow and return qos tags 797 */ 798 nircm->qos_rule.flow_qos_tag = (uint32_t)pr->flow_qos_tag; 799 nircm->qos_rule.return_qos_tag = (uint32_t)pr->return_qos_tag; 800 nircm->valid_flags |= SFE_RULE_CREATE_QOS_VALID; 801 802#ifdef ECM_CLASSIFIER_DSCP_ENABLE 803 /* 804 * DSCP information? 805 */ 806 if (pr->process_actions & ECM_CLASSIFIER_PROCESS_ACTION_DSCP) { 807 nircm->dscp_rule.flow_dscp = pr->flow_dscp; 808 nircm->dscp_rule.return_dscp = pr->return_dscp; 809 nircm->rule_flags |= SFE_RULE_CREATE_FLAG_DSCP_MARKING; 810 nircm->valid_flags |= SFE_RULE_CREATE_DSCP_MARKING_VALID; 811 } 812#endif 813 /* 814 * Set protocol 815 */ 816 nircm->tuple.protocol = (int32_t)protocol; 817 818 /* 819 * The flow_ip is where the connection established from 820 */ 821 ecm_db_connection_from_address_get(feci->ci, src_ip); 822 ECM_IP_ADDR_TO_SFE_IPV6_ADDR(nircm->tuple.flow_ip, src_ip); 823 824 /* 825 * The return_ip is where the connection is established to 826 */ 827 ecm_db_connection_to_address_get(feci->ci, dest_ip); 828 ECM_IP_ADDR_TO_SFE_IPV6_ADDR(nircm->tuple.return_ip, dest_ip); 829 830 /* 831 * Same approach as above for port information 832 */ 833 nircm->tuple.flow_ident = htons(ecm_db_connection_from_port_get(feci->ci)); 834 nircm->tuple.return_ident = htons(ecm_db_connection_to_port_nat_get(feci->ci)); 835 836 /* 837 * Get mac addresses. 838 * The src_mac is the mac address of the node that established the connection. 839 * This will work whether the from_node is LAN (egress) or WAN (ingress). 840 */ 841 ecm_db_connection_from_node_address_get(feci->ci, (uint8_t *)nircm->conn_rule.flow_mac); 842 843 /* 844 * The dest_mac is the mac address of the node that the connection is esatblished to. 845 */ 846 ecm_db_connection_to_nat_node_address_get(feci->ci, (uint8_t *)nircm->conn_rule.return_mac); 847 848 /* 849 * Get MTU information 850 */ 851 nircm->conn_rule.flow_mtu = (uint32_t)ecm_db_connection_from_iface_mtu_get(feci->ci); 852 nircm->conn_rule.return_mtu = (uint32_t)ecm_db_connection_to_iface_mtu_get(feci->ci); 853 854 /* 855 * Sync our creation command from the assigned classifiers to get specific additional creation rules. 856 * NOTE: These are called in ascending order of priority and so the last classifier (highest) shall 857 * override any preceding classifiers. 858 * This also gives the classifiers a chance to see that acceleration is being attempted. 859 */ 860 assignment_count = ecm_db_connection_classifier_assignments_get_and_ref(feci->ci, assignments); 861 for (aci_index = 0; aci_index < assignment_count; ++aci_index) { 862 struct ecm_classifier_instance *aci; 863 struct ecm_classifier_rule_create ecrc; 864 /* 865 * NOTE: The current classifiers do not sync anything to the underlying accel engines. 866 * In the future, if any of the classifiers wants to pass any parameter, these parameters 867 * should be received via this object and copied to the accel engine's create object (nircm). 868 */ 869 aci = assignments[aci_index]; 870 DEBUG_TRACE("%p: sync from: %p, type: %d\n", nnpci, aci, aci->type_get(aci)); 871 aci->sync_from_v6(aci, &ecrc); 872 } 873 ecm_db_connection_assignments_release(assignment_count, assignments); 874 875 /* 876 * Release the interface lists 877 */ 878 ecm_db_connection_interfaces_deref(from_ifaces, from_ifaces_first); 879 ecm_db_connection_interfaces_deref(to_ifaces, to_ifaces_first); 880 881 DEBUG_INFO("%p: NON_PORTED Accelerate connection %p\n" 882 "Protocol: %d\n" 883 "from_mtu: %u\n" 884 "to_mtu: %u\n" 885 "from_ip: " ECM_IP_ADDR_OCTAL_FMT ":%d\n" 886 "to_ip: " ECM_IP_ADDR_OCTAL_FMT ":%d\n" 887 "from_mac: %pM\n" 888 "to_mac: %pM\n" 889 "src_iface_num: %u\n" 890 "dest_iface_num: %u\n" 891 "ingress_inner_vlan_tag: %u\n" 892 "egress_inner_vlan_tag: %u\n" 893 "ingress_outer_vlan_tag: %u\n" 894 "egress_outer_vlan_tag: %u\n" 895 "rule_flags: %x\n" 896 "valid_flags: %x\n" 897 "return_pppoe_session_id: %u\n" 898 "return_pppoe_remote_mac: %pM\n" 899 "flow_pppoe_session_id: %u\n" 900 "flow_pppoe_remote_mac: %pM\n" 901 "flow_qos_tag: %x (%u)\n" 902 "return_qos_tag: %x (%u)\n" 903 "flow_dscp: %x\n" 904 "return_dscp: %x\n", 905 nnpci, 906 feci->ci, 907 nircm->tuple.protocol, 908 nircm->conn_rule.flow_mtu, 909 nircm->conn_rule.return_mtu, 910 ECM_IP_ADDR_TO_OCTAL(src_ip), nircm->tuple.flow_ident, 911 ECM_IP_ADDR_TO_OCTAL(dest_ip), nircm->tuple.return_ident, 912 nircm->conn_rule.flow_mac, 913 nircm->conn_rule.return_mac, 914 nircm->conn_rule.flow_interface_num, 915 nircm->conn_rule.return_interface_num, 916 nircm->vlan_primary_rule.ingress_vlan_tag, 917 nircm->vlan_primary_rule.egress_vlan_tag, 918 nircm->vlan_secondary_rule.ingress_vlan_tag, 919 nircm->vlan_secondary_rule.egress_vlan_tag, 920 nircm->rule_flags, 921 nircm->valid_flags, 922 nircm->pppoe_rule.return_pppoe_session_id, 923 nircm->pppoe_rule.return_pppoe_remote_mac, 924 nircm->pppoe_rule.flow_pppoe_session_id, 925 nircm->pppoe_rule.flow_pppoe_remote_mac, 926 nircm->qos_rule.flow_qos_tag, nircm->qos_rule.flow_qos_tag, 927 nircm->qos_rule.return_qos_tag, nircm->qos_rule.return_qos_tag, 928 nircm->dscp_rule.flow_dscp, 929 nircm->dscp_rule.return_dscp); 930 931 /* 932 * Now that the rule has been constructed we re-compare the generation occurrance counter. 933 * If there has been a change then we abort because the rule may have been created using 934 * unstable data - especially if another thread has begun regeneration of the connection state. 935 * NOTE: This does not prevent a regen from being flagged immediately after this line of code either, 936 * or while the acceleration rule is in flight to the nss. 937 * This is only to check for consistency of rule state - not that the state is stale. 938 * Remember that the connection is marked as "accel pending state" so if a regen is flagged immediately 939 * after this check passes, the connection will be decelerated and refreshed very quickly. 940 */ 941 if (regen_occurrances != ecm_db_connection_regeneration_occurrances_get(feci->ci)) { 942 DEBUG_INFO("%p: connection:%p regen occurred - aborting accel rule.\n", feci, feci->ci); 943 ecm_sfe_ipv6_accel_pending_clear(feci, ECM_FRONT_END_ACCELERATION_MODE_DECEL); 944 return; 945 } 946 947 /* 948 * Ref the connection before issuing an SFE rule 949 * This ensures that when the SFE responds to the command - which may even be immediately - 950 * the callback function can trust the correct ref was taken for its purpose. 951 * NOTE: remember that this will also implicitly hold the feci. 952 */ 953 ecm_db_connection_ref(feci->ci); 954 955 /* 956 * We are about to issue the command, record the time of transmission 957 */ 958 spin_lock_bh(&feci->lock); 959 feci->stats.cmd_time_begun = jiffies; 960 spin_unlock_bh(&feci->lock); 961 962 /* 963 * Call the rule create function 964 */ 965 sfe_tx_status = sfe_drv_ipv6_tx(ecm_sfe_ipv6_drv_mgr, &nim); 966 if (sfe_tx_status == SFE_TX_SUCCESS) { 967 /* 968 * Reset the driver_fail count - transmission was okay here. 969 */ 970 spin_lock_bh(&feci->lock); 971 feci->stats.driver_fail = 0; 972 spin_unlock_bh(&feci->lock); 973 return; 974 } 975 976 /* 977 * Release that ref! 978 */ 979 ecm_db_connection_deref(feci->ci); 980 981 /* 982 * TX failed 983 */ 984 spin_lock_bh(&feci->lock); 985 DEBUG_ASSERT(feci->accel_mode == ECM_FRONT_END_ACCELERATION_MODE_ACCEL_PENDING, "%p: Accel mode unexpected: %d\n", nnpci, feci->accel_mode); 986 feci->stats.driver_fail_total++; 987 feci->stats.driver_fail++; 988 if (feci->stats.driver_fail >= feci->stats.driver_fail_limit) { 989 DEBUG_WARN("%p: Accel failed - driver fail limit\n", nnpci); 990 result_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DRIVER; 991 } else { 992 result_mode = ECM_FRONT_END_ACCELERATION_MODE_DECEL; 993 } 994 995 spin_lock_bh(&ecm_sfe_ipv6_lock); 996 _ecm_sfe_ipv6_accel_pending_clear(feci, result_mode); 997 spin_unlock_bh(&ecm_sfe_ipv6_lock); 998 999 spin_unlock_bh(&feci->lock); 1000 return; 1001 1002non_ported_accel_bad_rule: 1003 ; 1004 1005 /* 1006 * Jump to here when rule data is bad and an offload command cannot be constructed 1007 */ 1008 DEBUG_WARN("%p: Accel failed - bad rule\n", nnpci); 1009 ecm_sfe_ipv6_accel_pending_clear(feci, ECM_FRONT_END_ACCELERATION_MODE_FAIL_RULE); 1010} 1011 1012/* 1013 * ecm_sfe_non_ported_ipv6_connection_destroy_callback() 1014 * Callback for handling destroy ack/nack calls. 1015 */ 1016static void ecm_sfe_non_ported_ipv6_connection_destroy_callback(void *app_data, struct sfe_ipv6_msg *nim) 1017{ 1018 struct sfe_ipv6_rule_destroy_msg *nirdm = &nim->msg.rule_destroy; 1019 uint32_t serial = (uint32_t)app_data; 1020 struct ecm_db_connection_instance *ci; 1021 struct ecm_front_end_connection_instance *feci; 1022 struct ecm_sfe_non_ported_ipv6_connection_instance *nnpci; 1023 ip_addr_t flow_ip; 1024 ip_addr_t return_ip; 1025 1026 /* 1027 * Is this a response to a destroy message? 1028 */ 1029 if (nim->cm.type != SFE_TX_DESTROY_RULE_MSG) { 1030 DEBUG_ERROR("%p: non_ported destroy callback with improper type: %d\n", nim, nim->cm.type); 1031 return; 1032 } 1033 1034 /* 1035 * Look up ecm connection so that we can update the status. 1036 */ 1037 ci = ecm_db_connection_serial_find_and_ref(serial); 1038 if (!ci) { 1039 DEBUG_TRACE("%p: destroy callback, connection not found, serial: %u\n", nim, serial); 1040 return; 1041 } 1042 1043 /* 1044 * Release ref held for this ack/nack response. 1045 * NOTE: It's okay to do this here, ci won't go away, because the ci is held as 1046 * a result of the ecm_db_connection_serial_find_and_ref() 1047 */ 1048 ecm_db_connection_deref(ci); 1049 1050 /* 1051 * Get the front end instance 1052 */ 1053 feci = ecm_db_connection_front_end_get_and_ref(ci); 1054 nnpci = (struct ecm_sfe_non_ported_ipv6_connection_instance *)feci; 1055 DEBUG_CHECK_MAGIC(nnpci, ECM_SFE_NON_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", nnpci); 1056 1057 ECM_SFE_IPV6_ADDR_TO_IP_ADDR(flow_ip, nirdm->tuple.flow_ip); 1058 ECM_SFE_IPV6_ADDR_TO_IP_ADDR(return_ip, nirdm->tuple.return_ip); 1059 1060 /* 1061 * Record command duration 1062 */ 1063 ecm_sfe_ipv6_decel_done_time_update(feci); 1064 1065 /* 1066 * Dump some useful trace information. 1067 */ 1068 DEBUG_TRACE("%p: decelerate response for connection: %p\n", nnpci, feci->ci); 1069 DEBUG_TRACE("%p: flow_ip: " ECM_IP_ADDR_OCTAL_FMT ":%d\n", nnpci, ECM_IP_ADDR_TO_OCTAL(flow_ip), nirdm->tuple.flow_ident); 1070 DEBUG_TRACE("%p: return_ip: " ECM_IP_ADDR_OCTAL_FMT ":%d\n", nnpci, ECM_IP_ADDR_TO_OCTAL(return_ip), nirdm->tuple.return_ident); 1071 DEBUG_TRACE("%p: protocol: %d\n", nnpci, nirdm->tuple.protocol); 1072 1073 /* 1074 * Drop decel pending counter 1075 */ 1076 spin_lock_bh(&ecm_sfe_ipv6_lock); 1077 ecm_sfe_ipv6_pending_decel_count--; 1078 DEBUG_ASSERT(ecm_sfe_ipv6_pending_decel_count >= 0, "Bad decel pending counter\n"); 1079 spin_unlock_bh(&ecm_sfe_ipv6_lock); 1080 1081 spin_lock_bh(&feci->lock); 1082 1083 /* 1084 * If decel is not still pending then it's possible that the SFE ended acceleration by some other reason e.g. flush 1085 * In which case we cannot rely on the response we get here. 1086 */ 1087 if (feci->accel_mode != ECM_FRONT_END_ACCELERATION_MODE_DECEL_PENDING) { 1088 spin_unlock_bh(&feci->lock); 1089 1090 /* 1091 * Release the connections. 1092 */ 1093 feci->deref(feci); 1094 ecm_db_connection_deref(ci); 1095 return; 1096 } 1097 1098 DEBUG_TRACE("%p: response: %d\n", nnpci, nim->cm.response); 1099 if (nim->cm.response != SFE_CMN_RESPONSE_ACK) { 1100 feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DECEL; 1101 } else { 1102 feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_DECEL; 1103 } 1104 1105 /* 1106 * If connection became defunct then set mode so that no further accel/decel attempts occur. 1107 */ 1108 if (feci->is_defunct) { 1109 feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT; 1110 } 1111 spin_unlock_bh(&feci->lock); 1112 1113 /* 1114 * NON_PORTED acceleration ends 1115 */ 1116 spin_lock_bh(&ecm_sfe_ipv6_lock); 1117 ecm_sfe_non_ported_ipv6_accelerated_count--; /* Protocol specific counter */ 1118 DEBUG_ASSERT(ecm_sfe_non_ported_ipv6_accelerated_count >= 0, "Bad non_ported accel counter\n"); 1119 ecm_sfe_ipv6_accelerated_count--; /* General running counter */ 1120 DEBUG_ASSERT(ecm_sfe_ipv6_accelerated_count >= 0, "Bad accel counter\n"); 1121 spin_unlock_bh(&ecm_sfe_ipv6_lock); 1122 1123 /* 1124 * Release the connections. 1125 */ 1126 feci->deref(feci); 1127 ecm_db_connection_deref(ci); 1128} 1129 1130/* 1131 * ecm_sfe_non_ported_ipv6_connection_decelerate() 1132 * Decelerate a connection 1133 */ 1134static void ecm_sfe_non_ported_ipv6_connection_decelerate(struct ecm_front_end_connection_instance *feci) 1135{ 1136 struct ecm_sfe_non_ported_ipv6_connection_instance *nnpci = (struct ecm_sfe_non_ported_ipv6_connection_instance *)feci; 1137 struct sfe_ipv6_msg nim; 1138 struct sfe_ipv6_rule_destroy_msg *nirdm; 1139 ip_addr_t src_ip; 1140 ip_addr_t dest_ip; 1141 sfe_tx_status_t sfe_tx_status; 1142 int protocol; 1143 1144 DEBUG_CHECK_MAGIC(nnpci, ECM_SFE_NON_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", nnpci); 1145 1146 /* 1147 * For non-ported protocols we only support IPIP. 1148 */ 1149 protocol = ecm_db_connection_protocol_get(feci->ci); 1150 if ((protocol != IPPROTO_IPIP)) { 1151 DEBUG_TRACE("%p: unsupported protocol: %d\n", nnpci, protocol); 1152 return; 1153 } 1154 1155 /* 1156 * If decelerate is in error or already pending then ignore 1157 */ 1158 spin_lock_bh(&feci->lock); 1159 if (feci->stats.decelerate_pending) { 1160 spin_unlock_bh(&feci->lock); 1161 return; 1162 } 1163 1164 /* 1165 * If acceleration is pending then we cannot decelerate right now or we will race with it 1166 * Set a decelerate pending flag that will be actioned when the acceleration command is complete. 1167 */ 1168 if (feci->accel_mode == ECM_FRONT_END_ACCELERATION_MODE_ACCEL_PENDING) { 1169 feci->stats.decelerate_pending = true; 1170 spin_unlock_bh(&feci->lock); 1171 return; 1172 } 1173 1174 /* 1175 * Can only decelerate if accelerated 1176 * NOTE: This will also deny accel when the connection is in fail condition too. 1177 */ 1178 if (feci->accel_mode != ECM_FRONT_END_ACCELERATION_MODE_ACCEL) { 1179 spin_unlock_bh(&feci->lock); 1180 return; 1181 } 1182 1183 /* 1184 * Initiate deceleration 1185 */ 1186 feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_DECEL_PENDING; 1187 spin_unlock_bh(&feci->lock); 1188 1189 /* 1190 * Increment the decel pending counter 1191 */ 1192 spin_lock_bh(&ecm_sfe_ipv6_lock); 1193 ecm_sfe_ipv6_pending_decel_count++; 1194 spin_unlock_bh(&ecm_sfe_ipv6_lock); 1195 1196 /* 1197 * Prepare deceleration message 1198 */ 1199 sfe_ipv6_msg_init(&nim, SFE_SPECIAL_INTERFACE_IPV6, SFE_TX_DESTROY_RULE_MSG, 1200 sizeof(struct sfe_ipv6_rule_destroy_msg), 1201 ecm_sfe_non_ported_ipv6_connection_destroy_callback, 1202 (void *)ecm_db_connection_serial_get(feci->ci)); 1203 1204 nirdm = &nim.msg.rule_destroy; 1205 nirdm->tuple.protocol = (int32_t)protocol; 1206 1207 /* 1208 * Get addressing information 1209 */ 1210 ecm_db_connection_from_address_get(feci->ci, src_ip); 1211 ECM_IP_ADDR_TO_SFE_IPV6_ADDR(nirdm->tuple.flow_ip, src_ip); 1212 ecm_db_connection_to_address_nat_get(feci->ci, dest_ip); 1213 ECM_IP_ADDR_TO_SFE_IPV6_ADDR(nirdm->tuple.return_ip, dest_ip); 1214 nirdm->tuple.flow_ident = htons(ecm_db_connection_from_port_get(feci->ci)); 1215 nirdm->tuple.return_ident = htons(ecm_db_connection_to_port_nat_get(feci->ci)); 1216 1217 DEBUG_INFO("%p: NON_PORTED Connection %p decelerate\n" 1218 "src_ip: " ECM_IP_ADDR_OCTAL_FMT ":%d\n" 1219 "dest_ip: " ECM_IP_ADDR_OCTAL_FMT ":%d\n", 1220 nnpci, feci->ci, 1221 ECM_IP_ADDR_TO_OCTAL(src_ip), nirdm->tuple.flow_ident, 1222 ECM_IP_ADDR_TO_OCTAL(dest_ip), nirdm->tuple.return_ident); 1223 1224 /* 1225 * Take a ref to the feci->ci so that it will persist until we get a response from the SFE. 1226 * NOTE: This will implicitly hold the feci too. 1227 */ 1228 ecm_db_connection_ref(feci->ci); 1229 1230 /* 1231 * We are about to issue the command, record the time of transmission 1232 */ 1233 spin_lock_bh(&feci->lock); 1234 feci->stats.cmd_time_begun = jiffies; 1235 spin_unlock_bh(&feci->lock); 1236 1237 /* 1238 * Destroy the SFE connection cache entry. 1239 */ 1240 sfe_tx_status = sfe_drv_ipv6_tx(ecm_sfe_ipv6_drv_mgr, &nim); 1241 if (sfe_tx_status == SFE_TX_SUCCESS) { 1242 /* 1243 * Reset the driver_fail count - transmission was okay here. 1244 */ 1245 spin_lock_bh(&feci->lock); 1246 feci->stats.driver_fail = 0; 1247 spin_unlock_bh(&feci->lock); 1248 return; 1249 } 1250 1251 /* 1252 * Release the ref take, SFE driver did not accept our command. 1253 */ 1254 ecm_db_connection_deref(feci->ci); 1255 1256 /* 1257 * TX failed 1258 */ 1259 spin_lock_bh(&feci->lock); 1260 feci->stats.driver_fail_total++; 1261 feci->stats.driver_fail++; 1262 if (feci->stats.driver_fail >= feci->stats.driver_fail_limit) { 1263 DEBUG_WARN("%p: Decel failed - driver fail limit\n", nnpci); 1264 feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DRIVER; 1265 } 1266 spin_unlock_bh(&feci->lock); 1267 1268 /* 1269 * Could not send the request, decrement the decel pending counter 1270 */ 1271 spin_lock_bh(&ecm_sfe_ipv6_lock); 1272 ecm_sfe_ipv6_pending_decel_count--; 1273 DEBUG_ASSERT(ecm_sfe_ipv6_pending_decel_count >= 0, "Bad decel pending counter\n"); 1274 spin_unlock_bh(&ecm_sfe_ipv6_lock); 1275 1276} 1277 1278/* 1279 * ecm_sfe_non_ported_ipv6_connection_defunct_callback() 1280 * Callback to be called when a non-ported connection has become defunct. 1281 */ 1282static void ecm_sfe_non_ported_ipv6_connection_defunct_callback(void *arg) 1283{ 1284 struct ecm_front_end_connection_instance *feci = (struct ecm_front_end_connection_instance *)arg; 1285 struct ecm_sfe_non_ported_ipv6_connection_instance *nnpci = (struct ecm_sfe_non_ported_ipv6_connection_instance *)feci; 1286 1287 DEBUG_CHECK_MAGIC(nnpci, ECM_SFE_NON_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", nnpci); 1288 1289 spin_lock_bh(&feci->lock); 1290 1291 /* 1292 * If connection has already become defunct, do nothing. 1293 */ 1294 if (feci->is_defunct) { 1295 spin_unlock_bh(&feci->lock); 1296 return; 1297 } 1298 feci->is_defunct = true; 1299 1300 /* 1301 * If the connection is already in one of the fail modes, do nothing, keep the current accel_mode. 1302 */ 1303 if (ECM_FRONT_END_ACCELERATION_FAILED(feci->accel_mode)) { 1304 spin_unlock_bh(&feci->lock); 1305 return; 1306 } 1307 1308 /* 1309 * If the connection is decel then ensure it will not attempt accel while defunct. 1310 */ 1311 if (feci->accel_mode == ECM_FRONT_END_ACCELERATION_MODE_DECEL) { 1312 feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT; 1313 spin_unlock_bh(&feci->lock); 1314 return; 1315 } 1316 1317 /* 1318 * If the connection is decel pending then decel operation is in progress anyway. 1319 */ 1320 if (feci->accel_mode == ECM_FRONT_END_ACCELERATION_MODE_DECEL_PENDING) { 1321 spin_unlock_bh(&feci->lock); 1322 return; 1323 } 1324 1325 /* 1326 * If none of the cases matched above, this means the connection is in one of the 1327 * accel modes (accel or accel_pending) so we force a deceleration. 1328 * NOTE: If the mode is accel pending then the decel will be actioned when that is completed. 1329 */ 1330 spin_unlock_bh(&feci->lock); 1331 ecm_sfe_non_ported_ipv6_connection_decelerate(feci); 1332} 1333 1334/* 1335 * ecm_sfe_non_ported_ipv6_connection_accel_state_get() 1336 * Get acceleration state 1337 */ 1338static ecm_front_end_acceleration_mode_t ecm_sfe_non_ported_ipv6_connection_accel_state_get(struct ecm_front_end_connection_instance *feci) 1339{ 1340 struct ecm_sfe_non_ported_ipv6_connection_instance *nnpci = (struct ecm_sfe_non_ported_ipv6_connection_instance *)feci; 1341 ecm_front_end_acceleration_mode_t state; 1342 1343 DEBUG_CHECK_MAGIC(nnpci, ECM_SFE_NON_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", nnpci); 1344 spin_lock_bh(&feci->lock); 1345 state = feci->accel_mode; 1346 spin_unlock_bh(&feci->lock); 1347 return state; 1348} 1349 1350/* 1351 * ecm_sfe_non_ported_ipv6_connection_action_seen() 1352 * Acceleration action / activity has been seen for this connection. 1353 * 1354 * NOTE: Call the action_seen() method when the SFE has demonstrated that it has offloaded some data for a connection. 1355 */ 1356static void ecm_sfe_non_ported_ipv6_connection_action_seen(struct ecm_front_end_connection_instance *feci) 1357{ 1358 struct ecm_sfe_non_ported_ipv6_connection_instance *nnpci = (struct ecm_sfe_non_ported_ipv6_connection_instance *)feci; 1359 1360 DEBUG_CHECK_MAGIC(nnpci, ECM_SFE_NON_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", nnpci); 1361 1362 DEBUG_INFO("%p: Action seen\n", nnpci); 1363 spin_lock_bh(&feci->lock); 1364 feci->stats.no_action_seen = 0; 1365 spin_unlock_bh(&feci->lock); 1366} 1367 1368/* 1369 * ecm_sfe_non_ported_ipv6_connection_accel_ceased() 1370 * SFE has indicated that acceleration has stopped. 1371 * 1372 * NOTE: This is called in response to an SFE self-initiated termination of acceleration. 1373 * This must NOT be called because the ECM terminated the acceleration. 1374 */ 1375static void ecm_sfe_non_ported_ipv6_connection_accel_ceased(struct ecm_front_end_connection_instance *feci) 1376{ 1377 struct ecm_sfe_non_ported_ipv6_connection_instance *nnpci = (struct ecm_sfe_non_ported_ipv6_connection_instance *)feci; 1378 1379 DEBUG_CHECK_MAGIC(nnpci, ECM_SFE_NON_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", nnpci); 1380 1381 DEBUG_INFO("%p: accel ceased\n", nnpci); 1382 1383 spin_lock_bh(&feci->lock); 1384 1385 /* 1386 * If we are in accel-pending state then the SFE has issued a flush out-of-order 1387 * with the ACK/NACK we are actually waiting for. 1388 * To work around this we record a "flush has already happened" and will action it when we finally get that ACK/NACK. 1389 * GGG TODO This should eventually be removed when the SFE honours messaging sequence. 1390 */ 1391 if (feci->accel_mode == ECM_FRONT_END_ACCELERATION_MODE_ACCEL_PENDING) { 1392 feci->stats.flush_happened = true; 1393 feci->stats.flush_happened_total++; 1394 spin_unlock_bh(&feci->lock); 1395 return; 1396 } 1397 1398 /* 1399 * If connection is no longer accelerated by the time we get here just ignore the command 1400 */ 1401 if (feci->accel_mode != ECM_FRONT_END_ACCELERATION_MODE_ACCEL) { 1402 spin_unlock_bh(&feci->lock); 1403 return; 1404 } 1405 1406 /* 1407 * If the no_action_seen counter was not reset then acceleration ended without any offload action 1408 */ 1409 if (feci->stats.no_action_seen) { 1410 feci->stats.no_action_seen_total++; 1411 } 1412 1413 /* 1414 * If the no_action_seen indicates successive cessations of acceleration without any offload action occuring 1415 * then we fail out this connection 1416 */ 1417 if (feci->stats.no_action_seen >= feci->stats.no_action_seen_limit) { 1418 feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_NO_ACTION; 1419 } else { 1420 feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_DECEL; 1421 } 1422 spin_unlock_bh(&feci->lock); 1423 1424 /* 1425 * Non-Ported acceleration ends 1426 */ 1427 spin_lock_bh(&ecm_sfe_ipv6_lock); 1428 ecm_sfe_non_ported_ipv6_accelerated_count--; /* Protocol specific counter */ 1429 DEBUG_ASSERT(ecm_sfe_non_ported_ipv6_accelerated_count >= 0, "Bad non-ported accel counter\n"); 1430 ecm_sfe_ipv6_accelerated_count--; /* General running counter */ 1431 DEBUG_ASSERT(ecm_sfe_ipv6_accelerated_count >= 0, "Bad accel counter\n"); 1432 spin_unlock_bh(&ecm_sfe_ipv6_lock); 1433} 1434 1435/* 1436 * ecm_sfe_non_ported_ipv6_connection_ref() 1437 * Ref a connection front end instance 1438 */ 1439static void ecm_sfe_non_ported_ipv6_connection_ref(struct ecm_front_end_connection_instance *feci) 1440{ 1441 struct ecm_sfe_non_ported_ipv6_connection_instance *nnpci = (struct ecm_sfe_non_ported_ipv6_connection_instance *)feci; 1442 1443 DEBUG_CHECK_MAGIC(nnpci, ECM_SFE_NON_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", nnpci); 1444 spin_lock_bh(&feci->lock); 1445 feci->refs++; 1446 DEBUG_TRACE("%p: nnpci ref %d\n", nnpci, feci->refs); 1447 DEBUG_ASSERT(feci->refs > 0, "%p: ref wrap\n", nnpci); 1448 spin_unlock_bh(&feci->lock); 1449} 1450 1451/* 1452 * ecm_sfe_non_ported_ipv6_connection_deref() 1453 * Deref a connection front end instance 1454 */ 1455static int ecm_sfe_non_ported_ipv6_connection_deref(struct ecm_front_end_connection_instance *feci) 1456{ 1457 struct ecm_sfe_non_ported_ipv6_connection_instance *nnpci = (struct ecm_sfe_non_ported_ipv6_connection_instance *)feci; 1458 1459 DEBUG_CHECK_MAGIC(nnpci, ECM_SFE_NON_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", nnpci); 1460 1461 spin_lock_bh(&feci->lock); 1462 feci->refs--; 1463 DEBUG_ASSERT(feci->refs >= 0, "%p: ref wrap\n", nnpci); 1464 1465 if (feci->refs > 0) { 1466 int refs = feci->refs; 1467 spin_unlock_bh(&feci->lock); 1468 DEBUG_TRACE("%p: nnpci deref %d\n", nnpci, refs); 1469 return refs; 1470 } 1471 spin_unlock_bh(&feci->lock); 1472 1473 /* 1474 * We can now destroy the instance 1475 */ 1476 DEBUG_TRACE("%p: nnpci final\n", nnpci); 1477 DEBUG_CLEAR_MAGIC(nnpci); 1478 kfree(nnpci); 1479 1480 return 0; 1481} 1482 1483#ifdef ECM_STATE_OUTPUT_ENABLE 1484/* 1485 * ecm_sfe_non_ported_ipv6_connection_state_get() 1486 * Return the state of this Non ported front end instance 1487 */ 1488static int ecm_sfe_non_ported_ipv6_connection_state_get(struct ecm_front_end_connection_instance *feci, struct ecm_state_file_instance *sfi) 1489{ 1490 int result; 1491 bool can_accel; 1492 ecm_front_end_acceleration_mode_t accel_mode; 1493 struct ecm_front_end_connection_mode_stats stats; 1494 struct ecm_sfe_non_ported_ipv6_connection_instance *nnpci = (struct ecm_sfe_non_ported_ipv6_connection_instance *)feci; 1495 1496 DEBUG_CHECK_MAGIC(nnpci, ECM_SFE_NON_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", nnpci); 1497 1498 spin_lock_bh(&feci->lock); 1499 can_accel = feci->can_accel; 1500 accel_mode = feci->accel_mode; 1501 memcpy(&stats, &feci->stats, sizeof(struct ecm_front_end_connection_mode_stats)); 1502 spin_unlock_bh(&feci->lock); 1503 1504 if ((result = ecm_state_prefix_add(sfi, "front_end_v6.non_ported"))) { 1505 return result; 1506 } 1507 1508 if ((result = ecm_state_write(sfi, "can_accel", "%d", can_accel))) { 1509 return result; 1510 } 1511 if ((result = ecm_state_write(sfi, "accel_mode", "%d", accel_mode))) { 1512 return result; 1513 } 1514 if ((result = ecm_state_write(sfi, "decelerate_pending", "%d", stats.decelerate_pending))) { 1515 return result; 1516 } 1517 if ((result = ecm_state_write(sfi, "flush_happened_total", "%d", stats.flush_happened_total))) { 1518 return result; 1519 } 1520 if ((result = ecm_state_write(sfi, "no_action_seen_total", "%d", stats.no_action_seen_total))) { 1521 return result; 1522 } 1523 if ((result = ecm_state_write(sfi, "no_action_seen", "%d", stats.no_action_seen))) { 1524 return result; 1525 } 1526 if ((result = ecm_state_write(sfi, "no_action_seen_limit", "%d", stats.no_action_seen_limit))) { 1527 return result; 1528 } 1529 if ((result = ecm_state_write(sfi, "driver_fail_total", "%d", stats.driver_fail_total))) { 1530 return result; 1531 } 1532 if ((result = ecm_state_write(sfi, "driver_fail", "%d", stats.driver_fail))) { 1533 return result; 1534 } 1535 if ((result = ecm_state_write(sfi, "driver_fail_limit", "%d", stats.driver_fail_limit))) { 1536 return result; 1537 } 1538 if ((result = ecm_state_write(sfi, "ae_nack_total", "%d", stats.ae_nack_total))) { 1539 return result; 1540 } 1541 if ((result = ecm_state_write(sfi, "ae_nack", "%d", stats.ae_nack))) { 1542 return result; 1543 } 1544 if ((result = ecm_state_write(sfi, "ae_nack_limit", "%d", stats.ae_nack_limit))) { 1545 return result; 1546 } 1547 1548 return ecm_state_prefix_remove(sfi); 1549} 1550#endif 1551 1552/* 1553 * ecm_sfe_non_ported_ipv6_connection_instance_alloc() 1554 * Create a front end instance specific for non-ported connection 1555 */ 1556static struct ecm_sfe_non_ported_ipv6_connection_instance *ecm_sfe_non_ported_ipv6_connection_instance_alloc( 1557 struct ecm_db_connection_instance *ci, 1558 bool can_accel) 1559{ 1560 struct ecm_sfe_non_ported_ipv6_connection_instance *nnpci; 1561 struct ecm_front_end_connection_instance *feci; 1562 1563 nnpci = (struct ecm_sfe_non_ported_ipv6_connection_instance *)kzalloc(sizeof(struct ecm_sfe_non_ported_ipv6_connection_instance), GFP_ATOMIC | __GFP_NOWARN); 1564 if (!nnpci) { 1565 DEBUG_WARN("Non-Ported Front end alloc failed\n"); 1566 return NULL; 1567 } 1568 1569 /* 1570 * Refs is 1 for the creator of the connection 1571 */ 1572 feci = (struct ecm_front_end_connection_instance *)nnpci; 1573 feci->refs = 1; 1574 DEBUG_SET_MAGIC(nnpci, ECM_SFE_NON_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC); 1575 spin_lock_init(&feci->lock); 1576 1577 feci->can_accel = can_accel; 1578 feci->accel_mode = (can_accel) ? ECM_FRONT_END_ACCELERATION_MODE_DECEL : ECM_FRONT_END_ACCELERATION_MODE_FAIL_DENIED; 1579 spin_lock_bh(&ecm_sfe_ipv6_lock); 1580 feci->stats.no_action_seen_limit = ecm_sfe_ipv6_no_action_limit_default; 1581 feci->stats.driver_fail_limit = ecm_sfe_ipv6_driver_fail_limit_default; 1582 feci->stats.ae_nack_limit = ecm_sfe_ipv6_nack_limit_default; 1583 spin_unlock_bh(&ecm_sfe_ipv6_lock); 1584 1585 /* 1586 * Copy reference to connection - no need to ref ci as ci maintains a ref to this instance instead (this instance persists for as long as ci does) 1587 */ 1588 feci->ci = ci; 1589 1590 /* 1591 * Populate the methods and callbacks 1592 */ 1593 feci->ref = ecm_sfe_non_ported_ipv6_connection_ref; 1594 feci->deref = ecm_sfe_non_ported_ipv6_connection_deref; 1595 feci->decelerate = ecm_sfe_non_ported_ipv6_connection_decelerate; 1596 feci->accel_state_get = ecm_sfe_non_ported_ipv6_connection_accel_state_get; 1597 feci->action_seen = ecm_sfe_non_ported_ipv6_connection_action_seen; 1598 feci->accel_ceased = ecm_sfe_non_ported_ipv6_connection_accel_ceased; 1599#ifdef ECM_STATE_OUTPUT_ENABLE 1600 feci->state_get = ecm_sfe_non_ported_ipv6_connection_state_get; 1601#endif 1602 feci->ae_interface_number_by_dev_get = ecm_sfe_common_get_interface_number_by_dev; 1603 1604 return nnpci; 1605} 1606 1607/* 1608 * ecm_sfe_non_ported_ipv6_process() 1609 * Process a protocol that does not have port based identifiers 1610 */ 1611unsigned int ecm_sfe_non_ported_ipv6_process(struct net_device *out_dev, 1612 struct net_device *in_dev, 1613 uint8_t *src_node_addr, 1614 uint8_t *dest_node_addr, 1615 bool can_accel, bool is_routed, bool is_l2_encap, struct sk_buff *skb, 1616 struct ecm_tracker_ip_header *ip_hdr, 1617 struct nf_conn *ct, ecm_tracker_sender_type_t sender, ecm_db_direction_t ecm_dir, 1618 struct nf_conntrack_tuple *orig_tuple, struct nf_conntrack_tuple *reply_tuple, 1619 ip_addr_t ip_src_addr, ip_addr_t ip_dest_addr) 1620{ 1621 struct ecm_db_connection_instance *ci; 1622 int protocol; 1623 int src_port; 1624 int dest_port; 1625 ip_addr_t match_addr; 1626 struct ecm_classifier_instance *assignments[ECM_CLASSIFIER_TYPES]; 1627 int aci_index; 1628 int assignment_count; 1629 ecm_db_timer_group_t ci_orig_timer_group; 1630 struct ecm_classifier_process_response prevalent_pr; 1631 1632 DEBUG_TRACE("Non-ported protocol src: " ECM_IP_ADDR_OCTAL_FMT ", dest: " ECM_IP_ADDR_OCTAL_FMT "\n", 1633 ECM_IP_ADDR_TO_OCTAL(ip_src_addr), ECM_IP_ADDR_TO_OCTAL(ip_dest_addr)); 1634 1635 /* 1636 * Look up a connection. 1637 */ 1638 protocol = (int)orig_tuple->dst.protonum; 1639 if ((protocol == IPPROTO_IPIP)) { 1640 src_port = 0; 1641 dest_port = 0; 1642 } else { 1643 /* 1644 * Do not accelerate the non-ported connections except the ones we handle. 1645 */ 1646 can_accel = false; 1647 1648 /* 1649 * port numbers are just the negative protocol number equivalents for now. 1650 * GGG They could eventually be used as protocol specific identifiers such as icmp id's etc. 1651 */ 1652 src_port = -protocol; 1653 dest_port = -protocol; 1654 } 1655 ci = ecm_db_connection_find_and_ref(ip_src_addr, ip_dest_addr, protocol, src_port, dest_port); 1656 1657 /* 1658 * If there is no existing connection then create a new one. 1659 */ 1660 if (unlikely(!ci)) { 1661 struct ecm_db_mapping_instance *src_mi; 1662 struct ecm_db_mapping_instance *dest_mi; 1663 struct ecm_db_node_instance *src_ni; 1664 struct ecm_db_node_instance *dest_ni; 1665 struct ecm_classifier_default_instance *dci; 1666 struct ecm_front_end_connection_instance *feci; 1667 struct ecm_db_connection_instance *nci; 1668 ecm_classifier_type_t classifier_type; 1669 int32_t to_list_first; 1670 struct ecm_db_iface_instance *to_list[ECM_DB_IFACE_HEIRARCHY_MAX]; 1671 int32_t from_list_first; 1672 struct ecm_db_iface_instance *from_list[ECM_DB_IFACE_HEIRARCHY_MAX]; 1673 1674 DEBUG_INFO("New connection from " ECM_IP_ADDR_OCTAL_FMT " to " ECM_IP_ADDR_OCTAL_FMT "\n", ECM_IP_ADDR_TO_OCTAL(ip_src_addr), ECM_IP_ADDR_TO_OCTAL(ip_dest_addr)); 1675 1676 /* 1677 * Before we attempt to create the connection are we being terminated? 1678 */ 1679 spin_lock_bh(&ecm_sfe_ipv6_lock); 1680 if (ecm_sfe_ipv6_terminate_pending) { 1681 spin_unlock_bh(&ecm_sfe_ipv6_lock); 1682 DEBUG_WARN("Terminating\n"); 1683 1684 /* 1685 * As we are terminating we just allow the packet to pass - it's no longer our concern 1686 */ 1687 return NF_ACCEPT; 1688 } 1689 spin_unlock_bh(&ecm_sfe_ipv6_lock); 1690 1691 /* 1692 * Does this connection have a conntrack entry? 1693 */ 1694 if (ct) { 1695 unsigned int conn_count; 1696 1697 /* 1698 * If we have exceeded the connection limit (according to conntrack) then abort 1699 * NOTE: Conntrack, when at its limit, will destroy a connection to make way for a new. 1700 * Conntrack won't exceed its limit but ECM can due to it needing to hold connections while 1701 * acceleration commands are in-flight. 1702 * This means that ECM can 'fall behind' somewhat with the connection state wrt conntrack connection state. 1703 * This is not seen as an issue since conntrack will have issued us with a destroy event for the flushed connection(s) 1704 * and we will eventually catch up. 1705 * Since ECM is capable of handling connections mid-flow ECM will pick up where it can. 1706 */ 1707 conn_count = (unsigned int)ecm_db_connection_count_get(); 1708 if (conn_count >= nf_conntrack_max) { 1709 DEBUG_WARN("ECM Connection count limit reached: db: %u, ct: %u\n", conn_count, nf_conntrack_max); 1710 return NF_ACCEPT; 1711 } 1712 } 1713 1714 /* 1715 * Now allocate the new connection 1716 */ 1717 nci = ecm_db_connection_alloc(); 1718 if (!nci) { 1719 DEBUG_WARN("Failed to allocate connection\n"); 1720 return NF_ACCEPT; 1721 } 1722 1723 /* 1724 * Connection must have a front end instance associated with it 1725 */ 1726 feci = (struct ecm_front_end_connection_instance *)ecm_sfe_non_ported_ipv6_connection_instance_alloc(nci, can_accel); 1727 if (!feci) { 1728 ecm_db_connection_deref(nci); 1729 DEBUG_WARN("Failed to allocate front end\n"); 1730 return NF_ACCEPT; 1731 } 1732 1733 /* 1734 * Get the src and destination mappings 1735 * For this we also need the interface lists which we also set upon the new connection while we are at it. 1736 * GGG TODO rework terms of "src/dest" - these need to be named consistently as from/to as per database terms. 1737 * GGG TODO The empty list checks should not be needed, mapping_establish_and_ref() should fail out if there is no list anyway. 1738 */ 1739 DEBUG_TRACE("%p: Create the 'from' interface heirarchy list\n", nci); 1740 from_list_first = ecm_interface_heirarchy_construct(feci, from_list, ip_dest_addr, ip_src_addr, 6, protocol, in_dev, is_routed, in_dev, src_node_addr, dest_node_addr, NULL); 1741 if (from_list_first == ECM_DB_IFACE_HEIRARCHY_MAX) { 1742 feci->deref(feci); 1743 ecm_db_connection_deref(nci); 1744 DEBUG_WARN("Failed to obtain 'from' heirarchy list\n"); 1745 return NF_ACCEPT; 1746 } 1747 ecm_db_connection_from_interfaces_reset(nci, from_list, from_list_first); 1748 1749 DEBUG_TRACE("%p: Create source node\n", nci); 1750 src_ni = ecm_sfe_ipv6_node_establish_and_ref(feci, in_dev, ip_src_addr, from_list, from_list_first, src_node_addr); 1751 ecm_db_connection_interfaces_deref(from_list, from_list_first); 1752 if (!src_ni) { 1753 feci->deref(feci); 1754 ecm_db_connection_deref(nci); 1755 DEBUG_WARN("Failed to establish source node\n"); 1756 return NF_ACCEPT; 1757 } 1758 1759 DEBUG_TRACE("%p: Create source mapping\n", nci); 1760 src_mi = ecm_sfe_ipv6_mapping_establish_and_ref(ip_src_addr, src_port); 1761 if (!src_mi) { 1762 ecm_db_node_deref(src_ni); 1763 feci->deref(feci); 1764 ecm_db_connection_deref(nci); 1765 DEBUG_WARN("Failed to establish src mapping\n"); 1766 return NF_ACCEPT; 1767 } 1768 1769 DEBUG_TRACE("%p: Create the 'to' interface heirarchy list\n", nci); 1770 to_list_first = ecm_interface_heirarchy_construct(feci, to_list, ip_src_addr, ip_dest_addr, 6, protocol, out_dev, is_routed, in_dev, dest_node_addr, src_node_addr, NULL); 1771 if (to_list_first == ECM_DB_IFACE_HEIRARCHY_MAX) { 1772 ecm_db_mapping_deref(src_mi); 1773 ecm_db_node_deref(src_ni); 1774 feci->deref(feci); 1775 ecm_db_connection_deref(nci); 1776 DEBUG_WARN("Failed to obtain 'to' heirarchy list\n"); 1777 return NF_ACCEPT; 1778 } 1779 ecm_db_connection_to_interfaces_reset(nci, to_list, to_list_first); 1780 1781 DEBUG_TRACE("%p: Create dest node\n", nci); 1782 dest_ni = ecm_sfe_ipv6_node_establish_and_ref(feci, out_dev, ip_dest_addr, to_list, to_list_first, dest_node_addr); 1783 ecm_db_connection_interfaces_deref(to_list, to_list_first); 1784 if (!dest_ni) { 1785 ecm_db_mapping_deref(src_mi); 1786 ecm_db_node_deref(src_ni); 1787 feci->deref(feci); 1788 ecm_db_connection_deref(nci); 1789 DEBUG_WARN("Failed to establish dest node\n"); 1790 return NF_ACCEPT; 1791 } 1792 1793 DEBUG_TRACE("%p: Create dest mapping\n", nci); 1794 dest_mi = ecm_sfe_ipv6_mapping_establish_and_ref(ip_dest_addr, dest_port); 1795 if (!dest_mi) { 1796 ecm_db_node_deref(dest_ni); 1797 ecm_db_mapping_deref(src_mi); 1798 ecm_db_node_deref(src_ni); 1799 feci->deref(feci); 1800 ecm_db_connection_deref(nci); 1801 DEBUG_WARN("Failed to establish dest mapping\n"); 1802 return NF_ACCEPT; 1803 } 1804 1805 /* 1806 * Every connection also needs a default classifier 1807 */ 1808 dci = ecm_classifier_default_instance_alloc(nci, protocol, ecm_dir, src_port, dest_port); 1809 if (!dci) { 1810 ecm_db_mapping_deref(dest_mi); 1811 ecm_db_node_deref(dest_ni); 1812 ecm_db_mapping_deref(src_mi); 1813 ecm_db_node_deref(src_ni); 1814 feci->deref(feci); 1815 ecm_db_connection_deref(nci); 1816 DEBUG_WARN("Failed to allocate default classifier\n"); 1817 return NF_ACCEPT; 1818 } 1819 ecm_db_connection_classifier_assign(nci, (struct ecm_classifier_instance *)dci); 1820 1821 /* 1822 * Every connection starts with a full complement of classifiers assigned. 1823 * NOTE: Default classifier is a special case considered previously 1824 */ 1825 for (classifier_type = ECM_CLASSIFIER_TYPE_DEFAULT + 1; classifier_type < ECM_CLASSIFIER_TYPES; ++classifier_type) { 1826 struct ecm_classifier_instance *aci = ecm_sfe_ipv6_assign_classifier(nci, classifier_type); 1827 if (aci) { 1828 aci->deref(aci); 1829 } else { 1830 dci->base.deref((struct ecm_classifier_instance *)dci); 1831 ecm_db_mapping_deref(dest_mi); 1832 ecm_db_node_deref(dest_ni); 1833 ecm_db_mapping_deref(src_mi); 1834 ecm_db_node_deref(src_ni); 1835 feci->deref(feci); 1836 ecm_db_connection_deref(nci); 1837 DEBUG_WARN("Failed to allocate classifiers assignments\n"); 1838 return NF_ACCEPT; 1839 } 1840 } 1841 1842 /* 1843 * Now add the connection into the database. 1844 * NOTE: In an SMP situation such as ours there is a possibility that more than one packet for the same 1845 * connection is being processed simultaneously. 1846 * We *could* end up creating more than one connection instance for the same actual connection. 1847 * To guard against this we now perform a mutex'd lookup of the connection + add once more - another cpu may have created it before us. 1848 */ 1849 spin_lock_bh(&ecm_sfe_ipv6_lock); 1850 ci = ecm_db_connection_find_and_ref(ip_src_addr, ip_dest_addr, protocol, src_port, dest_port); 1851 if (ci) { 1852 /* 1853 * Another cpu created the same connection before us - use the one we just found 1854 */ 1855 spin_unlock_bh(&ecm_sfe_ipv6_lock); 1856 ecm_db_connection_deref(nci); 1857 } else { 1858 struct ecm_tracker_instance *ti; 1859 ecm_db_timer_group_t tg; 1860 ecm_tracker_sender_state_t src_state; 1861 ecm_tracker_sender_state_t dest_state; 1862 ecm_tracker_connection_state_t state; 1863 1864 /* 1865 * Ask tracker for timer group to set the connection to initially. 1866 */ 1867 ti = dci->tracker_get_and_ref(dci); 1868 ti->state_get(ti, &src_state, &dest_state, &state, &tg); 1869 ti->deref(ti); 1870 1871 /* 1872 * Add the new connection we created into the database 1873 * NOTE: assign to a short timer group for now - it is the assigned classifiers responsibility to do this 1874 */ 1875 ecm_db_connection_add(nci, feci, src_mi, dest_mi, src_mi, dest_mi, 1876 src_ni, dest_ni, src_ni, dest_ni, 1877 6, protocol, ecm_dir, 1878 NULL /* final callback */, 1879 ecm_sfe_non_ported_ipv6_connection_defunct_callback, 1880 tg, is_routed, nci); 1881 1882 spin_unlock_bh(&ecm_sfe_ipv6_lock); 1883 1884 ci = nci; 1885 DEBUG_INFO("%p: New Non-ported protocol %d connection created\n", ci, protocol); 1886 } 1887 1888 /* 1889 * No longer need referenecs to the objects we created 1890 */ 1891 dci->base.deref((struct ecm_classifier_instance *)dci); 1892 ecm_db_mapping_deref(dest_mi); 1893 ecm_db_node_deref(dest_ni); 1894 ecm_db_mapping_deref(src_mi); 1895 ecm_db_node_deref(src_ni); 1896 feci->deref(feci); 1897 } 1898 1899 /* 1900 * Keep connection alive as we have seen activity 1901 */ 1902 if (!ecm_db_connection_defunct_timer_touch(ci)) { 1903 ecm_db_connection_deref(ci); 1904 return NF_ACCEPT; 1905 } 1906 1907 /* 1908 * Identify which side of the connection is sending 1909 * NOTE: This may be different than what sender is at the moment 1910 * given the connection we have located. 1911 */ 1912 ecm_db_connection_from_address_get(ci, match_addr); 1913 if (ECM_IP_ADDR_MATCH(ip_src_addr, match_addr)) { 1914 sender = ECM_TRACKER_SENDER_TYPE_SRC; 1915 } else { 1916 sender = ECM_TRACKER_SENDER_TYPE_DEST; 1917 } 1918 1919 /* 1920 * Do we need to action generation change? 1921 */ 1922 if (unlikely(ecm_db_connection_regeneration_required_check(ci))) { 1923 ecm_sfe_ipv6_connection_regenerate(ci, sender, out_dev, in_dev); 1924 } 1925 1926 /* 1927 * Iterate the assignments and call to process! 1928 * Policy implemented: 1929 * 1. Classifiers that say they are not relevant are unassigned and not actioned further. 1930 * 2. Any drop command from any classifier is honoured. 1931 * 3. Accel is never allowed for non-ported type connections. 1932 * 4. Only the highest priority classifier, that actions it, will have its qos tag honoured. 1933 * 5. Only the highest priority classifier, that actions it, will have its timer group honoured. 1934 */ 1935 DEBUG_TRACE("%p: process begin, skb: %p\n", ci, skb); 1936 prevalent_pr.process_actions = 0; 1937 prevalent_pr.drop = false; 1938 prevalent_pr.flow_qos_tag = skb->priority; 1939 prevalent_pr.return_qos_tag = skb->priority; 1940 prevalent_pr.accel_mode = ECM_CLASSIFIER_ACCELERATION_MODE_ACCEL; 1941 prevalent_pr.timer_group = ci_orig_timer_group = ecm_db_connection_timer_group_get(ci); 1942 1943 assignment_count = ecm_db_connection_classifier_assignments_get_and_ref(ci, assignments); 1944 for (aci_index = 0; aci_index < assignment_count; ++aci_index) { 1945 struct ecm_classifier_process_response aci_pr; 1946 struct ecm_classifier_instance *aci; 1947 1948 aci = assignments[aci_index]; 1949 DEBUG_TRACE("%p: process: %p, type: %d\n", ci, aci, aci->type_get(aci)); 1950 aci->process(aci, sender, ip_hdr, skb, &aci_pr); 1951 DEBUG_TRACE("%p: aci_pr: process actions: %x, became relevant: %u, relevance: %d, drop: %d, " 1952 "flow_qos_tag: %u, return_qos_tag: %u, accel_mode: %x, timer_group: %d\n", 1953 ci, aci_pr.process_actions, aci_pr.became_relevant, aci_pr.relevance, aci_pr.drop, 1954 aci_pr.flow_qos_tag, aci_pr.return_qos_tag, aci_pr.accel_mode, aci_pr.timer_group); 1955 1956 if (aci_pr.relevance == ECM_CLASSIFIER_RELEVANCE_NO) { 1957 ecm_classifier_type_t aci_type; 1958 1959 /* 1960 * This classifier can be unassigned - PROVIDED it is not the default classifier 1961 */ 1962 aci_type = aci->type_get(aci); 1963 if (aci_type == ECM_CLASSIFIER_TYPE_DEFAULT) { 1964 continue; 1965 } 1966 1967 DEBUG_INFO("%p: Classifier not relevant, unassign: %d", ci, aci_type); 1968 ecm_db_connection_classifier_unassign(ci, aci); 1969 continue; 1970 } 1971 1972 /* 1973 * Yes or Maybe relevant. 1974 */ 1975 if (aci_pr.process_actions & ECM_CLASSIFIER_PROCESS_ACTION_DROP) { 1976 /* 1977 * Drop command from any classifier is actioned. 1978 */ 1979 DEBUG_TRACE("%p: wants drop: %p, type: %d, skb: %p\n", ci, aci, aci->type_get(aci), skb); 1980 prevalent_pr.drop |= aci_pr.drop; 1981 } 1982 1983 /* 1984 * Accel mode permission 1985 */ 1986 if (aci_pr.relevance == ECM_CLASSIFIER_RELEVANCE_MAYBE) { 1987 /* 1988 * Classifier not sure of its relevance - cannot accel yet 1989 */ 1990 DEBUG_TRACE("%p: accel denied by maybe: %p, type: %d\n", ci, aci, aci->type_get(aci)); 1991 prevalent_pr.accel_mode = ECM_CLASSIFIER_ACCELERATION_MODE_NO; 1992 } else { 1993 if (aci_pr.process_actions & ECM_CLASSIFIER_PROCESS_ACTION_ACCEL_MODE) { 1994 if (aci_pr.accel_mode == ECM_CLASSIFIER_ACCELERATION_MODE_NO) { 1995 DEBUG_TRACE("%p: accel denied: %p, type: %d\n", ci, aci, aci->type_get(aci)); 1996 prevalent_pr.accel_mode = ECM_CLASSIFIER_ACCELERATION_MODE_NO; 1997 } 1998 /* else yes or don't care about accel */ 1999 } 2000 } 2001 2002 /* 2003 * Timer group (the last classifier i.e. the highest priority one) will 'win' 2004 */ 2005 if (aci_pr.process_actions & ECM_CLASSIFIER_PROCESS_ACTION_TIMER_GROUP) { 2006 DEBUG_TRACE("%p: timer group: %p, type: %d, group: %d\n", ci, aci, aci->type_get(aci), aci_pr.timer_group); 2007 prevalent_pr.timer_group = aci_pr.timer_group; 2008 } 2009 2010 /* 2011 * Qos tag (the last classifier i.e. the highest priority one) will 'win' 2012 */ 2013 if (aci_pr.process_actions & ECM_CLASSIFIER_PROCESS_ACTION_QOS_TAG) { 2014 DEBUG_TRACE("%p: aci: %p, type: %d, flow qos tag: %u, return qos tag: %u\n", 2015 ci, aci, aci->type_get(aci), aci_pr.flow_qos_tag, aci_pr.return_qos_tag); 2016 prevalent_pr.flow_qos_tag = aci_pr.flow_qos_tag; 2017 prevalent_pr.return_qos_tag = aci_pr.return_qos_tag; 2018 } 2019 2020#ifdef ECM_CLASSIFIER_DSCP_ENABLE 2021 /* 2022 * If any classifier denied DSCP remarking then that overrides every classifier 2023 */ 2024 if (aci_pr.process_actions & ECM_CLASSIFIER_PROCESS_ACTION_DSCP_DENY) { 2025 DEBUG_TRACE("%p: aci: %p, type: %d, DSCP remark denied\n", 2026 ci, aci, aci->type_get(aci)); 2027 prevalent_pr.process_actions |= ECM_CLASSIFIER_PROCESS_ACTION_DSCP_DENY; 2028 prevalent_pr.process_actions &= ~ECM_CLASSIFIER_PROCESS_ACTION_DSCP; 2029 } 2030 2031 /* 2032 * DSCP remark action, but only if it has not been denied by any classifier 2033 */ 2034 if (aci_pr.process_actions & ECM_CLASSIFIER_PROCESS_ACTION_DSCP) { 2035 if (!(prevalent_pr.process_actions & ECM_CLASSIFIER_PROCESS_ACTION_DSCP_DENY)) { 2036 DEBUG_TRACE("%p: aci: %p, type: %d, DSCP remark wanted, flow_dscp: %u, return dscp: %u\n", 2037 ci, aci, aci->type_get(aci), aci_pr.flow_dscp, aci_pr.return_dscp); 2038 prevalent_pr.process_actions |= ECM_CLASSIFIER_PROCESS_ACTION_DSCP; 2039 prevalent_pr.flow_dscp = aci_pr.flow_dscp; 2040 prevalent_pr.return_dscp = aci_pr.return_dscp; 2041 } 2042 } 2043#endif 2044 } 2045 ecm_db_connection_assignments_release(assignment_count, assignments); 2046 2047 /* 2048 * Change timer group? 2049 */ 2050 if (ci_orig_timer_group != prevalent_pr.timer_group) { 2051 DEBUG_TRACE("%p: change timer group from: %d to: %d\n", ci, ci_orig_timer_group, prevalent_pr.timer_group); 2052 ecm_db_connection_defunct_timer_reset(ci, prevalent_pr.timer_group); 2053 } 2054 2055 /* 2056 * Drop? 2057 */ 2058 if (prevalent_pr.drop) { 2059 DEBUG_TRACE("%p: drop: %p\n", ci, skb); 2060 ecm_db_connection_data_totals_update_dropped(ci, (sender == ECM_TRACKER_SENDER_TYPE_SRC)? true : false, skb->len, 1); 2061 ecm_db_connection_deref(ci); 2062 return NF_ACCEPT; 2063 } 2064 ecm_db_connection_data_totals_update(ci, (sender == ECM_TRACKER_SENDER_TYPE_SRC)? true : false, skb->len, 1); 2065 2066 /* 2067 * Assign qos tag 2068 * GGG TODO Should we use sender to identify whether to use flow or return qos tag? 2069 */ 2070 skb->priority = prevalent_pr.flow_qos_tag; 2071 DEBUG_TRACE("%p: skb priority: %u\n", ci, skb->priority); 2072 2073 /* 2074 * Accelerate? 2075 */ 2076 if (prevalent_pr.accel_mode == ECM_CLASSIFIER_ACCELERATION_MODE_ACCEL) { 2077 struct ecm_front_end_connection_instance *feci; 2078 DEBUG_TRACE("%p: accel\n", ci); 2079 feci = ecm_db_connection_front_end_get_and_ref(ci); 2080 ecm_sfe_non_ported_ipv6_connection_accelerate(feci, &prevalent_pr, is_l2_encap); 2081 feci->deref(feci); 2082 } 2083 ecm_db_connection_deref(ci); 2084 2085 return NF_ACCEPT; 2086} 2087 2088/* 2089 * ecm_sfe_non_ported_ipv6_debugfs_init() 2090 */ 2091bool ecm_sfe_non_ported_ipv6_debugfs_init(struct dentry *dentry) 2092{ 2093 if (!debugfs_create_u32("non_ported_accelerated_count", S_IRUGO, dentry, 2094 (u32 *)&ecm_sfe_non_ported_ipv6_accelerated_count)) { 2095 DEBUG_ERROR("Failed to create ecm sfe ipv6 non_ported_accelerated_count file in debugfs\n"); 2096 return false; 2097 } 2098 2099 return true; 2100} 2101