1/* 2 ************************************************************************** 3 * Copyright (c) 2014-2015 The Linux Foundation. All rights reserved. 4 * Permission to use, copy, modify, and/or distribute this software for 5 * any purpose with or without fee is hereby granted, provided that the 6 * above copyright notice and this permission notice appear in all copies. 7 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 8 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 9 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR 10 * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 11 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN 12 * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT 13 * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 14 ************************************************************************** 15 */ 16 17#include <linux/version.h> 18#include <linux/types.h> 19#include <linux/ip.h> 20#include <linux/tcp.h> 21#include <linux/module.h> 22#include <linux/skbuff.h> 23#include <linux/icmp.h> 24#include <linux/debugfs.h> 25#include <linux/kthread.h> 26#include <linux/pkt_sched.h> 27#include <linux/string.h> 28#include <net/ip6_route.h> 29#include <net/ip6_fib.h> 30#include <net/addrconf.h> 31#include <net/ipv6.h> 32#include <net/tcp.h> 33#include <asm/unaligned.h> 34#include <asm/uaccess.h> /* for put_user */ 35#include <net/ipv6.h> 36#include <linux/inet.h> 37#include <linux/in6.h> 38#include <linux/udp.h> 39#include <linux/tcp.h> 40#include <linux/inetdevice.h> 41#include <linux/if_arp.h> 42#include <linux/netfilter_ipv6.h> 43#include <linux/netfilter_bridge.h> 44#include <linux/if_bridge.h> 45#include <net/arp.h> 46#include <net/netfilter/nf_conntrack.h> 47#include <net/netfilter/nf_conntrack_acct.h> 48#include <net/netfilter/nf_conntrack_helper.h> 49#include <net/netfilter/nf_conntrack_l4proto.h> 50#include <net/netfilter/nf_conntrack_l3proto.h> 51#include <net/netfilter/nf_conntrack_zones.h> 52#include <net/netfilter/nf_conntrack_core.h> 53#include <net/netfilter/ipv6/nf_conntrack_ipv6.h> 54#include <net/netfilter/ipv6/nf_defrag_ipv6.h> 55#ifdef ECM_INTERFACE_VLAN_ENABLE 56#include <linux/../../net/8021q/vlan.h> 57#include <linux/if_vlan.h> 58#endif 59 60/* 61 * Debug output levels 62 * 0 = OFF 63 * 1 = ASSERTS / ERRORS 64 * 2 = 1 + WARN 65 * 3 = 2 + INFO 66 * 4 = 3 + TRACE 67 */ 68#define DEBUG_LEVEL ECM_NSS_PORTED_IPV6_DEBUG_LEVEL 69 70#include <nss_api_if.h> 71 72#include "ecm_types.h" 73#include "ecm_db_types.h" 74#include "ecm_state.h" 75#include "ecm_tracker.h" 76#include "ecm_classifier.h" 77#include "ecm_front_end_types.h" 78#include "ecm_tracker_datagram.h" 79#include "ecm_tracker_udp.h" 80#include "ecm_tracker_tcp.h" 81#include "ecm_db.h" 82#include "ecm_classifier_default.h" 83#include "ecm_interface.h" 84#include "ecm_nss_ported_ipv6.h" 85#include "ecm_nss_ipv6.h" 86#include "ecm_nss_common.h" 87 88/* 89 * Magic numbers 90 */ 91#define ECM_NSS_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC 0xEB12 92 93/* 94 * Protocol type that ported file supports. 95 */ 96enum ecm_nss_ported_ipv6_proto_types { 97 ECM_NSS_PORTED_IPV6_PROTO_TCP = 0, 98 ECM_NSS_PORTED_IPV6_PROTO_UDP, 99 ECM_NSS_PORTED_IPV6_PROTO_MAX 100 101}; 102 103/* 104 * struct ecm_nss_ipv6_ported_connection_instance 105 * A connection specific front end instance for PORTED connections 106 */ 107struct ecm_nss_ported_ipv6_connection_instance { 108 struct ecm_front_end_connection_instance base; /* Base class */ 109 uint8_t ported_accelerated_count_index; /* Index value of accelerated count array (UDP or TCP) */ 110#if (DEBUG_LEVEL > 0) 111 uint16_t magic; 112#endif 113}; 114 115static int ecm_nss_ported_ipv6_accelerated_count[ECM_NSS_PORTED_IPV6_PROTO_MAX] = {0}; 116 /* Array of Number of TCP and UDP connections currently offloaded */ 117 118/* 119 * Expose what should be a static flag in the TCP connection tracker. 120 */ 121#ifdef ECM_OPENWRT_SUPPORT 122extern int nf_ct_tcp_no_window_check; 123#endif 124extern int nf_ct_tcp_be_liberal; 125 126/* 127 * ecm_nss_ported_ipv6_connection_callback() 128 * Callback for handling create ack/nack calls. 129 */ 130static void ecm_nss_ported_ipv6_connection_callback(void *app_data, struct nss_ipv6_msg *nim) 131{ 132 struct nss_ipv6_rule_create_msg *nircm = &nim->msg.rule_create; 133 uint32_t serial = (uint32_t)app_data; 134 struct ecm_db_connection_instance *ci; 135 struct ecm_front_end_connection_instance *feci; 136 struct ecm_nss_ported_ipv6_connection_instance *npci; 137 ip_addr_t flow_ip; 138 ip_addr_t return_ip; 139 ecm_front_end_acceleration_mode_t result_mode; 140 141 /* 142 * Is this a response to a create message? 143 */ 144 if (nim->cm.type != NSS_IPV6_TX_CREATE_RULE_MSG) { 145 DEBUG_ERROR("%p: ported create callback with improper type: %d, serial: %u\n", nim, nim->cm.type, serial); 146 return; 147 } 148 149 /* 150 * Look up ecm connection so that we can update the status. 151 */ 152 ci = ecm_db_connection_serial_find_and_ref(serial); 153 if (!ci) { 154 DEBUG_TRACE("%p: create callback, connection not found, serial: %u\n", nim, serial); 155 return; 156 } 157 158 /* 159 * Release ref held for this ack/nack response. 160 * NOTE: It's okay to do this here, ci won't go away, because the ci is held as 161 * a result of the ecm_db_connection_serial_find_and_ref() 162 */ 163 ecm_db_connection_deref(ci); 164 165 /* 166 * Get the front end instance 167 */ 168 feci = ecm_db_connection_front_end_get_and_ref(ci); 169 npci = (struct ecm_nss_ported_ipv6_connection_instance *)feci; 170 DEBUG_CHECK_MAGIC(npci, ECM_NSS_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", npci); 171 172 ECM_NSS_IPV6_ADDR_TO_IP_ADDR(flow_ip, nircm->tuple.flow_ip); 173 ECM_NSS_IPV6_ADDR_TO_IP_ADDR(return_ip, nircm->tuple.return_ip); 174 175 /* 176 * Record command duration 177 */ 178 ecm_nss_ipv6_accel_done_time_update(feci); 179 180 /* 181 * Dump some useful trace information. 182 */ 183 DEBUG_TRACE("%p: accelerate response for connection: %p, serial: %u\n", npci, feci->ci, serial); 184 DEBUG_TRACE("%p: rule_flags: %x, valid_flags: %x\n", npci, nircm->rule_flags, nircm->valid_flags); 185 DEBUG_TRACE("%p: flow_ip: " ECM_IP_ADDR_OCTAL_FMT ":%d\n", npci, ECM_IP_ADDR_TO_OCTAL(flow_ip), nircm->tuple.flow_ident); 186 DEBUG_TRACE("%p: return_ip: " ECM_IP_ADDR_OCTAL_FMT ":%d\n", npci, ECM_IP_ADDR_TO_OCTAL(return_ip), nircm->tuple.return_ident); 187 DEBUG_TRACE("%p: protocol: %d\n", npci, nircm->tuple.protocol); 188 189 /* 190 * Handle the creation result code. 191 */ 192 DEBUG_TRACE("%p: response: %d\n", npci, nim->cm.response); 193 if (nim->cm.response != NSS_CMN_RESPONSE_ACK) { 194 /* 195 * Creation command failed (specific reason ignored). 196 */ 197 DEBUG_TRACE("%p: accel nack: %d\n", npci, nim->cm.error); 198 spin_lock_bh(&feci->lock); 199 DEBUG_ASSERT(feci->accel_mode == ECM_FRONT_END_ACCELERATION_MODE_ACCEL_PENDING, "%p: Unexpected mode: %d\n", ci, feci->accel_mode); 200 feci->stats.ae_nack++; 201 feci->stats.ae_nack_total++; 202 if (feci->stats.ae_nack >= feci->stats.ae_nack_limit) { 203 /* 204 * Too many NSS rejections 205 */ 206 result_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_ACCEL_ENGINE; 207 } else { 208 /* 209 * Revert to decelerated 210 */ 211 result_mode = ECM_FRONT_END_ACCELERATION_MODE_DECEL; 212 } 213 214 /* 215 * If connection is now defunct then set mode to ensure no further accel attempts occur 216 */ 217 if (feci->is_defunct) { 218 result_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT; 219 } 220 221 spin_lock_bh(&ecm_nss_ipv6_lock); 222 _ecm_nss_ipv6_accel_pending_clear(feci, result_mode); 223 spin_unlock_bh(&ecm_nss_ipv6_lock); 224 225 spin_unlock_bh(&feci->lock); 226 227 /* 228 * Release the connection. 229 */ 230 feci->deref(feci); 231 ecm_db_connection_deref(ci); 232 return; 233 } 234 235 spin_lock_bh(&feci->lock); 236 DEBUG_ASSERT(feci->accel_mode == ECM_FRONT_END_ACCELERATION_MODE_ACCEL_PENDING, "%p: Unexpected mode: %d\n", ci, feci->accel_mode); 237 238 /* 239 * If a flush occured before we got the ACK then our acceleration was effectively cancelled on us 240 * GGG TODO This is a workaround for a NSS message OOO quirk, this should eventually be removed. 241 */ 242 if (feci->stats.flush_happened) { 243 feci->stats.flush_happened = false; 244 245 /* 246 * Increment the no-action counter. Our connection was decelerated on us with no action occurring. 247 */ 248 feci->stats.no_action_seen++; 249 250 spin_lock_bh(&ecm_nss_ipv6_lock); 251 _ecm_nss_ipv6_accel_pending_clear(feci, ECM_FRONT_END_ACCELERATION_MODE_DECEL); 252 spin_unlock_bh(&ecm_nss_ipv6_lock); 253 254 spin_unlock_bh(&feci->lock); 255 256 /* 257 * Release the connection. 258 */ 259 feci->deref(feci); 260 ecm_db_connection_deref(ci); 261 return; 262 } 263 264 /* 265 * Create succeeded 266 */ 267 268 /* 269 * Clear any nack count 270 */ 271 feci->stats.ae_nack = 0; 272 273 /* 274 * Clear the "accelerate pending" state and move to "accelerated" state bumping 275 * the accelerated counters to match our new state. 276 * 277 * Decelerate may have been attempted while we were "pending accel" and 278 * this function will return true if that was the case. 279 * If decelerate was pending then we need to begin deceleration :-( 280 */ 281 spin_lock_bh(&ecm_nss_ipv6_lock); 282 283 ecm_nss_ported_ipv6_accelerated_count[npci->ported_accelerated_count_index]++; /* Protocol specific counter */ 284 ecm_nss_ipv6_accelerated_count++; /* General running counter */ 285 286 if (!_ecm_nss_ipv6_accel_pending_clear(feci, ECM_FRONT_END_ACCELERATION_MODE_ACCEL)) { 287 /* 288 * Increment the no-action counter, this is reset if offload action is seen 289 */ 290 feci->stats.no_action_seen++; 291 292 spin_unlock_bh(&ecm_nss_ipv6_lock); 293 spin_unlock_bh(&feci->lock); 294 295 /* 296 * Release the connection. 297 */ 298 feci->deref(feci); 299 ecm_db_connection_deref(ci); 300 return; 301 } 302 303 DEBUG_INFO("%p: Decelerate was pending\n", ci); 304 305 spin_unlock_bh(&ecm_nss_ipv6_lock); 306 spin_unlock_bh(&feci->lock); 307 308 feci->decelerate(feci); 309 310 /* 311 * Release the connection. 312 */ 313 feci->deref(feci); 314 ecm_db_connection_deref(ci); 315} 316 317/* 318 * ecm_nss_ported_ipv6_connection_accelerate() 319 * Accelerate a connection 320 */ 321static void ecm_nss_ported_ipv6_connection_accelerate(struct ecm_front_end_connection_instance *feci, 322 struct ecm_classifier_process_response *pr, 323 struct nf_conn *ct, bool is_l2_encap) 324{ 325 struct ecm_nss_ported_ipv6_connection_instance *npci = (struct ecm_nss_ported_ipv6_connection_instance *)feci; 326 uint16_t regen_occurrances; 327 int protocol; 328 int32_t from_ifaces_first; 329 int32_t to_ifaces_first; 330 struct ecm_db_iface_instance *from_ifaces[ECM_DB_IFACE_HEIRARCHY_MAX]; 331 struct ecm_db_iface_instance *to_ifaces[ECM_DB_IFACE_HEIRARCHY_MAX]; 332 struct ecm_db_iface_instance *from_nss_iface; 333 struct ecm_db_iface_instance *to_nss_iface; 334 int32_t from_nss_iface_id; 335 int32_t to_nss_iface_id; 336 uint8_t from_nss_iface_address[ETH_ALEN]; 337 uint8_t to_nss_iface_address[ETH_ALEN]; 338 struct nss_ipv6_msg nim; 339 struct nss_ipv6_rule_create_msg *nircm; 340 struct ecm_classifier_instance *assignments[ECM_CLASSIFIER_TYPES]; 341 int aci_index; 342 int assignment_count; 343 nss_tx_status_t nss_tx_status; 344 int32_t list_index; 345 int32_t interface_type_counts[ECM_DB_IFACE_TYPE_COUNT]; 346 bool rule_invalid; 347 ip_addr_t src_ip; 348 ip_addr_t dest_ip; 349 ecm_front_end_acceleration_mode_t result_mode; 350 351 DEBUG_CHECK_MAGIC(npci, ECM_NSS_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", npci); 352 353 /* 354 * Get the re-generation occurrance counter of the connection. 355 * We compare it again at the end - to ensure that the rule construction has seen no generation 356 * changes during rule creation. 357 */ 358 regen_occurrances = ecm_db_connection_regeneration_occurrances_get(feci->ci); 359 360 /* 361 * Test if acceleration is permitted 362 */ 363 if (!ecm_nss_ipv6_accel_pending_set(feci)) { 364 DEBUG_TRACE("%p: Acceleration not permitted: %p\n", feci, feci->ci); 365 return; 366 } 367 368 /* 369 * Okay construct an accel command. 370 * Initialise creation structure. 371 * NOTE: We leverage the app_data void pointer to be our 32 bit connection serial number. 372 * When we get it back we re-cast it to a uint32 and do a faster connection lookup. 373 */ 374 memset(&nim, 0, sizeof(struct nss_ipv6_msg)); 375 nss_ipv6_msg_init(&nim, NSS_IPV6_RX_INTERFACE, NSS_IPV6_TX_CREATE_RULE_MSG, 376 sizeof(struct nss_ipv6_rule_create_msg), 377 ecm_nss_ported_ipv6_connection_callback, 378 (void *)ecm_db_connection_serial_get(feci->ci)); 379 380 nircm = &nim.msg.rule_create; 381 nircm->valid_flags = 0; 382 nircm->rule_flags = 0; 383 384 /* 385 * Initialize VLAN tag information 386 */ 387 nircm->vlan_primary_rule.ingress_vlan_tag = ECM_NSS_CONNMGR_VLAN_ID_NOT_CONFIGURED; 388 nircm->vlan_primary_rule.egress_vlan_tag = ECM_NSS_CONNMGR_VLAN_ID_NOT_CONFIGURED; 389 nircm->vlan_secondary_rule.ingress_vlan_tag = ECM_NSS_CONNMGR_VLAN_ID_NOT_CONFIGURED; 390 nircm->vlan_secondary_rule.egress_vlan_tag = ECM_NSS_CONNMGR_VLAN_ID_NOT_CONFIGURED; 391 392 /* 393 * Get the interface lists of the connection, we must have at least one interface in the list to continue 394 */ 395 from_ifaces_first = ecm_db_connection_from_interfaces_get_and_ref(feci->ci, from_ifaces); 396 if (from_ifaces_first == ECM_DB_IFACE_HEIRARCHY_MAX) { 397 DEBUG_WARN("%p: Accel attempt failed - no interfaces in from_interfaces list!\n", npci); 398 goto ported_accel_bad_rule; 399 } 400 401 to_ifaces_first = ecm_db_connection_to_interfaces_get_and_ref(feci->ci, to_ifaces); 402 if (to_ifaces_first == ECM_DB_IFACE_HEIRARCHY_MAX) { 403 DEBUG_WARN("%p: Accel attempt failed - no interfaces in to_interfaces list!\n", npci); 404 ecm_db_connection_interfaces_deref(from_ifaces, from_ifaces_first); 405 goto ported_accel_bad_rule; 406 } 407 408 /* 409 * First interface in each must be a known nss interface 410 */ 411 from_nss_iface = from_ifaces[from_ifaces_first]; 412 to_nss_iface = to_ifaces[to_ifaces_first]; 413 from_nss_iface_id = ecm_db_iface_ae_interface_identifier_get(from_nss_iface); 414 to_nss_iface_id = ecm_db_iface_ae_interface_identifier_get(to_nss_iface); 415 if ((from_nss_iface_id < 0) || (to_nss_iface_id < 0)) { 416 DEBUG_TRACE("%p: from_nss_iface_id: %d, to_nss_iface_id: %d\n", npci, from_nss_iface_id, to_nss_iface_id); 417 ecm_db_connection_interfaces_deref(from_ifaces, from_ifaces_first); 418 ecm_db_connection_interfaces_deref(to_ifaces, to_ifaces_first); 419 goto ported_accel_bad_rule; 420 } 421 422 /* 423 * New rule being created 424 */ 425 nircm->valid_flags |= NSS_IPV6_RULE_CREATE_CONN_VALID; 426 427 /* 428 * Set interface numbers involved in accelerating this connection. 429 * These are the outer facing addresses from the heirarchy interface lists we got above. 430 * These may be overridden later if we detect special interface types e.g. ipsec. 431 */ 432 nircm->conn_rule.flow_interface_num = from_nss_iface_id; 433 nircm->conn_rule.return_interface_num = to_nss_iface_id; 434 435 /* 436 * We know that each outward facing interface is known to the NSS and so this connection could be accelerated. 437 * However the lists may also specify other interesting details that must be included in the creation command, 438 * for example, ethernet MAC, VLAN tagging or PPPoE session information. 439 * We get this information by walking from the outer to the innermost interface for each list and examine the interface types. 440 * 441 * Start with the 'from' (src) side. 442 * NOTE: The lists may contain a complex heirarchy of similar type of interface e.g. multiple vlans or tunnels within tunnels. 443 * This NSS cannot handle that - there is no way to describe this in the rule - if we see multiple types that would conflict we have to abort. 444 */ 445 DEBUG_TRACE("%p: Examine from/src heirarchy list\n", npci); 446 memset(interface_type_counts, 0, sizeof(interface_type_counts)); 447 rule_invalid = false; 448 for (list_index = from_ifaces_first; !rule_invalid && (list_index < ECM_DB_IFACE_HEIRARCHY_MAX); list_index++) { 449 struct ecm_db_iface_instance *ii; 450 ecm_db_iface_type_t ii_type; 451 char *ii_name; 452 453 ii = from_ifaces[list_index]; 454 ii_type = ecm_db_connection_iface_type_get(ii); 455 ii_name = ecm_db_interface_type_to_string(ii_type); 456 DEBUG_TRACE("%p: list_index: %d, ii: %p, type: %d (%s)\n", npci, list_index, ii, ii_type, ii_name); 457 458 /* 459 * Extract information from this interface type if it is applicable to the rule. 460 * Conflicting information may cause accel to be unsupported. 461 */ 462 switch (ii_type) { 463#ifdef ECM_INTERFACE_PPP_ENABLE 464 struct ecm_db_interface_info_pppoe pppoe_info; 465#endif 466#ifdef ECM_INTERFACE_VLAN_ENABLE 467 struct ecm_db_interface_info_vlan vlan_info; 468 uint32_t vlan_value = 0; 469 struct net_device *vlan_in_dev = NULL; 470#endif 471 case ECM_DB_IFACE_TYPE_BRIDGE: 472 DEBUG_TRACE("%p: Bridge\n", npci); 473 if (interface_type_counts[ii_type] != 0) { 474 /* 475 * Cannot cascade bridges 476 */ 477 rule_invalid = true; 478 DEBUG_TRACE("%p: Bridge - ignore additional\n", npci); 479 break; 480 } 481 ecm_db_iface_bridge_address_get(ii, from_nss_iface_address); 482 if (is_valid_ether_addr(from_nss_iface_address)) { 483 memcpy(nircm->src_mac_rule.flow_src_mac, from_nss_iface_address, ETH_ALEN); 484 nircm->src_mac_rule.mac_valid_flags |= NSS_IPV4_SRC_MAC_FLOW_VALID; 485 nircm->valid_flags |= NSS_IPV4_RULE_CREATE_SRC_MAC_VALID; 486 } 487 488 DEBUG_TRACE("%p: Bridge - mac: %pM\n", npci, from_nss_iface_address); 489 break; 490 case ECM_DB_IFACE_TYPE_ETHERNET: 491 DEBUG_TRACE("%p: Ethernet\n", npci); 492 if (interface_type_counts[ii_type] != 0) { 493 /* 494 * Ignore additional mac addresses, these are usually as a result of address propagation 495 * from bridges down to ports etc. 496 */ 497 DEBUG_TRACE("%p: Ethernet - ignore additional\n", npci); 498 break; 499 } 500 501 /* 502 * Can only handle one MAC, the first outermost mac. 503 */ 504 ecm_db_iface_ethernet_address_get(ii, from_nss_iface_address); 505 DEBUG_TRACE("%p: Ethernet - mac: %pM\n", npci, from_nss_iface_address); 506 break; 507 case ECM_DB_IFACE_TYPE_PPPOE: 508#ifdef ECM_INTERFACE_PPP_ENABLE 509 /* 510 * More than one PPPoE in the list is not valid! 511 */ 512 if (interface_type_counts[ii_type] != 0) { 513 DEBUG_TRACE("%p: PPPoE - additional unsupported\n", npci); 514 rule_invalid = true; 515 break; 516 } 517 518 /* 519 * Copy pppoe session info to the creation structure. 520 */ 521 ecm_db_iface_pppoe_session_info_get(ii, &pppoe_info); 522 523 nircm->pppoe_rule.flow_pppoe_session_id = pppoe_info.pppoe_session_id; 524 memcpy(nircm->pppoe_rule.flow_pppoe_remote_mac, pppoe_info.remote_mac, ETH_ALEN); 525 nircm->valid_flags |= NSS_IPV6_RULE_CREATE_PPPOE_VALID; 526 527 DEBUG_TRACE("%p: PPPoE - session: %x, mac: %pM\n", npci, 528 nircm->pppoe_rule.flow_pppoe_session_id, 529 nircm->pppoe_rule.flow_pppoe_remote_mac); 530#else 531 rule_invalid = true; 532#endif 533 break; 534 case ECM_DB_IFACE_TYPE_VLAN: 535#ifdef ECM_INTERFACE_VLAN_ENABLE 536 DEBUG_TRACE("%p: VLAN\n", npci); 537 if (interface_type_counts[ii_type] > 1) { 538 /* 539 * Can only support two vlans 540 */ 541 rule_invalid = true; 542 DEBUG_TRACE("%p: VLAN - additional unsupported\n", npci); 543 break; 544 } 545 ecm_db_iface_vlan_info_get(ii, &vlan_info); 546 vlan_value = ((vlan_info.vlan_tpid << 16) | vlan_info.vlan_tag); 547 548 /* 549 * Look up the vlan device and incorporate the vlan priority into the vlan_value 550 */ 551 vlan_in_dev = dev_get_by_index(&init_net, ecm_db_iface_interface_identifier_get(ii)); 552 if (vlan_in_dev) { 553 vlan_value |= vlan_dev_get_egress_prio(vlan_in_dev, pr->return_qos_tag); 554 dev_put(vlan_in_dev); 555 vlan_in_dev = NULL; 556 } 557 558 /* 559 * Primary or secondary (QinQ) VLAN? 560 */ 561 if (interface_type_counts[ii_type] == 0) { 562 nircm->vlan_primary_rule.ingress_vlan_tag = vlan_value; 563 } else { 564 nircm->vlan_secondary_rule.ingress_vlan_tag = vlan_value; 565 } 566 nircm->valid_flags |= NSS_IPV6_RULE_CREATE_VLAN_VALID; 567 568 /* 569 * If we have not yet got an ethernet mac then take this one (very unlikely as mac should have been propagated to the slave (outer) device 570 */ 571 if (interface_type_counts[ECM_DB_IFACE_TYPE_ETHERNET] == 0) { 572 memcpy(from_nss_iface_address, vlan_info.address, ETH_ALEN); 573 interface_type_counts[ECM_DB_IFACE_TYPE_ETHERNET]++; 574 DEBUG_TRACE("%p: VLAN use mac: %pM\n", npci, from_nss_iface_address); 575 } 576 DEBUG_TRACE("%p: vlan tag: %x\n", npci, vlan_value); 577#else 578 rule_invalid = true; 579 DEBUG_TRACE("%p: VLAN - unsupported\n", npci); 580#endif 581 break; 582 case ECM_DB_IFACE_TYPE_IPSEC_TUNNEL: 583#ifdef ECM_INTERFACE_IPSEC_ENABLE 584 DEBUG_TRACE("%p: IPSEC\n", npci); 585 if (interface_type_counts[ii_type] != 0) { 586 /* 587 * Can only support one ipsec 588 */ 589 rule_invalid = true; 590 DEBUG_TRACE("%p: IPSEC - additional unsupported\n", npci); 591 break; 592 } 593 nircm->conn_rule.flow_interface_num = NSS_C2C_TX_INTERFACE; 594#else 595 rule_invalid = true; 596 DEBUG_TRACE("%p: IPSEC - unsupported\n", npci); 597#endif 598 break; 599 default: 600 DEBUG_TRACE("%p: Ignoring: %d (%s)\n", npci, ii_type, ii_name); 601 } 602 603 /* 604 * Seen an interface of this type 605 */ 606 interface_type_counts[ii_type]++; 607 } 608 if (rule_invalid) { 609 DEBUG_WARN("%p: from/src Rule invalid\n", npci); 610 ecm_db_connection_interfaces_deref(from_ifaces, from_ifaces_first); 611 ecm_db_connection_interfaces_deref(to_ifaces, to_ifaces_first); 612 goto ported_accel_bad_rule; 613 } 614 615 /* 616 * Now examine the TO / DEST heirarchy list to construct the destination part of the rule 617 */ 618 DEBUG_TRACE("%p: Examine to/dest heirarchy list\n", npci); 619 memset(interface_type_counts, 0, sizeof(interface_type_counts)); 620 rule_invalid = false; 621 for (list_index = to_ifaces_first; !rule_invalid && (list_index < ECM_DB_IFACE_HEIRARCHY_MAX); list_index++) { 622 struct ecm_db_iface_instance *ii; 623 ecm_db_iface_type_t ii_type; 624 char *ii_name; 625 626 ii = to_ifaces[list_index]; 627 ii_type = ecm_db_connection_iface_type_get(ii); 628 ii_name = ecm_db_interface_type_to_string(ii_type); 629 DEBUG_TRACE("%p: list_index: %d, ii: %p, type: %d (%s)\n", npci, list_index, ii, ii_type, ii_name); 630 631 /* 632 * Extract information from this interface type if it is applicable to the rule. 633 * Conflicting information may cause accel to be unsupported. 634 */ 635 switch (ii_type) { 636#ifdef ECM_INTERFACE_PPP_ENABLE 637 struct ecm_db_interface_info_pppoe pppoe_info; 638#endif 639#ifdef ECM_INTERFACE_VLAN_ENABLE 640 struct ecm_db_interface_info_vlan vlan_info; 641 uint32_t vlan_value = 0; 642 struct net_device *vlan_out_dev = NULL; 643#endif 644 case ECM_DB_IFACE_TYPE_BRIDGE: 645 DEBUG_TRACE("%p: Bridge\n", npci); 646 if (interface_type_counts[ii_type] != 0) { 647 /* 648 * Cannot cascade bridges 649 */ 650 rule_invalid = true; 651 DEBUG_TRACE("%p: Bridge - ignore additional\n", npci); 652 break; 653 } 654 ecm_db_iface_bridge_address_get(ii, to_nss_iface_address); 655 if (is_valid_ether_addr(to_nss_iface_address)) { 656 memcpy(nircm->src_mac_rule.return_src_mac, to_nss_iface_address, ETH_ALEN); 657 nircm->src_mac_rule.mac_valid_flags |= NSS_IPV4_SRC_MAC_RETURN_VALID; 658 nircm->valid_flags |= NSS_IPV4_RULE_CREATE_SRC_MAC_VALID; 659 } 660 661 DEBUG_TRACE("%p: Bridge - mac: %pM\n", npci, to_nss_iface_address); 662 break; 663 case ECM_DB_IFACE_TYPE_ETHERNET: 664 DEBUG_TRACE("%p: Ethernet\n", npci); 665 if (interface_type_counts[ii_type] != 0) { 666 /* 667 * Ignore additional mac addresses, these are usually as a result of address propagation 668 * from bridges down to ports etc. 669 */ 670 DEBUG_TRACE("%p: Ethernet - ignore additional\n", npci); 671 break; 672 } 673 674 /* 675 * Can only handle one MAC, the first outermost mac. 676 */ 677 ecm_db_iface_ethernet_address_get(ii, to_nss_iface_address); 678 DEBUG_TRACE("%p: Ethernet - mac: %pM\n", npci, to_nss_iface_address); 679 break; 680 case ECM_DB_IFACE_TYPE_PPPOE: 681#ifdef ECM_INTERFACE_PPP_ENABLE 682 /* 683 * More than one PPPoE in the list is not valid! 684 */ 685 if (interface_type_counts[ii_type] != 0) { 686 DEBUG_TRACE("%p: PPPoE - additional unsupported\n", npci); 687 rule_invalid = true; 688 break; 689 } 690 691 /* 692 * Copy pppoe session info to the creation structure. 693 */ 694 ecm_db_iface_pppoe_session_info_get(ii, &pppoe_info); 695 nircm->pppoe_rule.return_pppoe_session_id = pppoe_info.pppoe_session_id; 696 memcpy(nircm->pppoe_rule.return_pppoe_remote_mac, pppoe_info.remote_mac, ETH_ALEN); 697 nircm->valid_flags |= NSS_IPV6_RULE_CREATE_PPPOE_VALID; 698 699 DEBUG_TRACE("%p: PPPoE - session: %x, mac: %pM\n", npci, 700 nircm->pppoe_rule.return_pppoe_session_id, 701 nircm->pppoe_rule.return_pppoe_remote_mac); 702#else 703 rule_invalid = true; 704#endif 705 break; 706 case ECM_DB_IFACE_TYPE_VLAN: 707#ifdef ECM_INTERFACE_VLAN_ENABLE 708 DEBUG_TRACE("%p: VLAN\n", npci); 709 if (interface_type_counts[ii_type] > 1) { 710 /* 711 * Can only support two vlans 712 */ 713 rule_invalid = true; 714 DEBUG_TRACE("%p: VLAN - additional unsupported\n", npci); 715 break; 716 } 717 ecm_db_iface_vlan_info_get(ii, &vlan_info); 718 vlan_value = ((vlan_info.vlan_tpid << 16) | vlan_info.vlan_tag); 719 720 /* 721 * Look up the vlan device and incorporate the vlan priority into the vlan_value 722 */ 723 vlan_out_dev = dev_get_by_index(&init_net, ecm_db_iface_interface_identifier_get(ii)); 724 if (vlan_out_dev) { 725 vlan_value |= vlan_dev_get_egress_prio(vlan_out_dev, pr->flow_qos_tag); 726 dev_put(vlan_out_dev); 727 vlan_out_dev = NULL; 728 } 729 730 /* 731 * Primary or secondary (QinQ) VLAN? 732 */ 733 if (interface_type_counts[ii_type] == 0) { 734 nircm->vlan_primary_rule.egress_vlan_tag = vlan_value; 735 } else { 736 nircm->vlan_secondary_rule.egress_vlan_tag = vlan_value; 737 } 738 nircm->valid_flags |= NSS_IPV6_RULE_CREATE_VLAN_VALID; 739 740 /* 741 * If we have not yet got an ethernet mac then take this one (very unlikely as mac should have been propagated to the slave (outer) device 742 */ 743 if (interface_type_counts[ECM_DB_IFACE_TYPE_ETHERNET] == 0) { 744 memcpy(to_nss_iface_address, vlan_info.address, ETH_ALEN); 745 interface_type_counts[ECM_DB_IFACE_TYPE_ETHERNET]++; 746 DEBUG_TRACE("%p: VLAN use mac: %pM\n", npci, to_nss_iface_address); 747 } 748 DEBUG_TRACE("%p: vlan tag: %x\n", npci, vlan_value); 749#else 750 rule_invalid = true; 751 DEBUG_TRACE("%p: VLAN - unsupported\n", npci); 752#endif 753 break; 754 case ECM_DB_IFACE_TYPE_IPSEC_TUNNEL: 755#ifdef ECM_INTERFACE_IPSEC_ENABLE 756 DEBUG_TRACE("%p: IPSEC\n", npci); 757 if (interface_type_counts[ii_type] != 0) { 758 /* 759 * Can only support one ipsec 760 */ 761 rule_invalid = true; 762 DEBUG_TRACE("%p: IPSEC - additional unsupported\n", npci); 763 break; 764 } 765 nircm->conn_rule.return_interface_num = NSS_C2C_TX_INTERFACE; 766#else 767 rule_invalid = true; 768 DEBUG_TRACE("%p: IPSEC - unsupported\n", npci); 769#endif 770 break; 771 default: 772 DEBUG_TRACE("%p: Ignoring: %d (%s)\n", npci, ii_type, ii_name); 773 } 774 775 /* 776 * Seen an interface of this type 777 */ 778 interface_type_counts[ii_type]++; 779 } 780 if (rule_invalid) { 781 DEBUG_WARN("%p: from/src Rule invalid\n", npci); 782 ecm_db_connection_interfaces_deref(from_ifaces, from_ifaces_first); 783 ecm_db_connection_interfaces_deref(to_ifaces, to_ifaces_first); 784 goto ported_accel_bad_rule; 785 } 786 787 /* 788 * Routed or bridged? 789 */ 790 if (ecm_db_connection_is_routed_get(feci->ci)) { 791 nircm->rule_flags |= NSS_IPV6_RULE_CREATE_FLAG_ROUTED; 792 } else { 793 nircm->rule_flags |= NSS_IPV6_RULE_CREATE_FLAG_BRIDGE_FLOW; 794 if (is_l2_encap) { 795 nircm->rule_flags |= NSS_IPV6_RULE_CREATE_FLAG_L2_ENCAP; 796 } 797 } 798 799 /* 800 * Set up the flow and return qos tags 801 */ 802 nircm->qos_rule.flow_qos_tag = (uint32_t)pr->flow_qos_tag; 803 nircm->qos_rule.return_qos_tag = (uint32_t)pr->return_qos_tag; 804 nircm->valid_flags |= NSS_IPV6_RULE_CREATE_QOS_VALID; 805 806#ifdef ECM_CLASSIFIER_DSCP_ENABLE 807 /* 808 * DSCP information? 809 */ 810 if (pr->process_actions & ECM_CLASSIFIER_PROCESS_ACTION_DSCP) { 811 nircm->dscp_rule.flow_dscp = pr->flow_dscp; 812 nircm->dscp_rule.return_dscp = pr->return_dscp; 813 nircm->rule_flags |= NSS_IPV6_RULE_CREATE_FLAG_DSCP_MARKING; 814 nircm->valid_flags |= NSS_IPV6_RULE_CREATE_DSCP_MARKING_VALID; 815 } 816#endif 817 protocol = ecm_db_connection_protocol_get(feci->ci); 818 819 /* 820 * Set protocol 821 */ 822 nircm->tuple.protocol = (int32_t)protocol; 823 824 /* 825 * The flow_ip is where the connection established from 826 */ 827 ecm_db_connection_from_address_get(feci->ci, src_ip); 828 ECM_IP_ADDR_TO_NSS_IPV6_ADDR(nircm->tuple.flow_ip, src_ip); 829 830 /* 831 * The dest_ip is where the connection is established to 832 */ 833 ecm_db_connection_to_address_get(feci->ci, dest_ip); 834 ECM_IP_ADDR_TO_NSS_IPV6_ADDR(nircm->tuple.return_ip, dest_ip); 835 836 /* 837 * Same approach as above for port information 838 */ 839 nircm->tuple.flow_ident = ecm_db_connection_from_port_get(feci->ci); 840 nircm->tuple.return_ident = ecm_db_connection_to_port_get(feci->ci); 841 842 /* 843 * Get mac addresses. 844 * The src_mac is the mac address of the node that established the connection. 845 * This will work whether the from_node is LAN (egress) or WAN (ingress). 846 */ 847 ecm_db_connection_from_node_address_get(feci->ci, (uint8_t *)nircm->conn_rule.flow_mac); 848 849 /* 850 * The dest_mac is more complex. For egress it is the node address of the 'to' side of the connection. 851 * For ingress it is the node adress of the NAT'ed 'to' IP. 852 * Essentially it is the MAC of node associated with create.dest_ip and this is "to nat" side. 853 */ 854 ecm_db_connection_to_node_address_get(feci->ci, (uint8_t *)nircm->conn_rule.return_mac); 855 856 /* 857 * Get MTU information 858 */ 859 nircm->conn_rule.flow_mtu = (uint32_t)ecm_db_connection_from_iface_mtu_get(feci->ci); 860 nircm->conn_rule.return_mtu = (uint32_t)ecm_db_connection_to_iface_mtu_get(feci->ci); 861 862 if (protocol == IPPROTO_TCP) { 863 /* 864 * Need window scaling information from conntrack if available 865 * Start by looking up the conntrack connection 866 */ 867 if (!ct) { 868 /* 869 * No conntrack so no need to check window sequence space 870 */ 871 DEBUG_TRACE("%p: TCP Accel no ct from conn %p to get window data\n", npci, feci->ci); 872 nircm->rule_flags |= NSS_IPV6_RULE_CREATE_FLAG_NO_SEQ_CHECK; 873 } else { 874 spin_lock_bh(&ct->lock); 875 DEBUG_TRACE("%p: TCP Accel Get window data from ct %p for conn %p\n", npci, ct, feci->ci); 876 877 nircm->tcp_rule.flow_window_scale = ct->proto.tcp.seen[0].td_scale; 878 nircm->tcp_rule.flow_max_window = ct->proto.tcp.seen[0].td_maxwin; 879 nircm->tcp_rule.flow_end = ct->proto.tcp.seen[0].td_end; 880 nircm->tcp_rule.flow_max_end = ct->proto.tcp.seen[0].td_maxend; 881 nircm->tcp_rule.return_window_scale = ct->proto.tcp.seen[1].td_scale; 882 nircm->tcp_rule.return_max_window = ct->proto.tcp.seen[1].td_maxwin; 883 nircm->tcp_rule.return_end = ct->proto.tcp.seen[1].td_end; 884 nircm->tcp_rule.return_max_end = ct->proto.tcp.seen[1].td_maxend; 885#ifdef ECM_OPENWRT_SUPPORT 886 if (nf_ct_tcp_be_liberal || nf_ct_tcp_no_window_check 887#else 888 if (nf_ct_tcp_be_liberal 889#endif 890 || (ct->proto.tcp.seen[0].flags & IP_CT_TCP_FLAG_BE_LIBERAL) 891 || (ct->proto.tcp.seen[1].flags & IP_CT_TCP_FLAG_BE_LIBERAL)) { 892 nircm->rule_flags |= NSS_IPV6_RULE_CREATE_FLAG_NO_SEQ_CHECK; 893 } 894 spin_unlock_bh(&ct->lock); 895 } 896 897 nircm->valid_flags |= NSS_IPV6_RULE_CREATE_TCP_VALID; 898 } 899 900 /* 901 * Sync our creation command from the assigned classifiers to get specific additional creation rules. 902 * NOTE: These are called in ascending order of priority and so the last classifier (highest) shall 903 * override any preceding classifiers. 904 * This also gives the classifiers a chance to see that acceleration is being attempted. 905 */ 906 assignment_count = ecm_db_connection_classifier_assignments_get_and_ref(feci->ci, assignments); 907 for (aci_index = 0; aci_index < assignment_count; ++aci_index) { 908 struct ecm_classifier_instance *aci; 909 struct ecm_classifier_rule_create ecrc; 910 /* 911 * NOTE: The current classifiers do not sync anything to the underlying accel engines. 912 * In the future, if any of the classifiers wants to pass any parameter, these parameters 913 * should be received via this object and copied to the accel engine's create object (nircm). 914 */ 915 aci = assignments[aci_index]; 916 DEBUG_TRACE("%p: sync from: %p, type: %d\n", npci, aci, aci->type_get(aci)); 917 aci->sync_from_v6(aci, &ecrc); 918 } 919 ecm_db_connection_assignments_release(assignment_count, assignments); 920 921 /* 922 * Release the interface lists 923 */ 924 ecm_db_connection_interfaces_deref(from_ifaces, from_ifaces_first); 925 ecm_db_connection_interfaces_deref(to_ifaces, to_ifaces_first); 926 927 DEBUG_INFO("%p: Ported Accelerate connection %p\n" 928 "Protocol: %d\n" 929 "from_mtu: %u\n" 930 "to_mtu: %u\n" 931 "from_ip: " ECM_IP_ADDR_OCTAL_FMT ":%d\n" 932 "to_ip: " ECM_IP_ADDR_OCTAL_FMT ":%d\n" 933 "from_mac: %pM\n" 934 "to_mac: %pM\n" 935 "src_iface_num: %u\n" 936 "dest_iface_num: %u\n" 937 "ingress_inner_vlan_tag: %u\n" 938 "egress_inner_vlan_tag: %u\n" 939 "ingress_outer_vlan_tag: %u\n" 940 "egress_outer_vlan_tag: %u\n" 941 "rule_flags: %x\n" 942 "valid_flags: %x\n" 943 "return_pppoe_session_id: %u\n" 944 "return_pppoe_remote_mac: %pM\n" 945 "flow_pppoe_session_id: %u\n" 946 "flow_pppoe_remote_mac: %pM\n" 947 "flow_qos_tag: %x (%u)\n" 948 "return_qos_tag: %x (%u)\n" 949 "flow_dscp: %x\n" 950 "return_dscp: %x\n", 951 npci, 952 feci->ci, 953 nircm->tuple.protocol, 954 nircm->conn_rule.flow_mtu, 955 nircm->conn_rule.return_mtu, 956 ECM_IP_ADDR_TO_OCTAL(src_ip), nircm->tuple.flow_ident, 957 ECM_IP_ADDR_TO_OCTAL(dest_ip), nircm->tuple.return_ident, 958 nircm->conn_rule.flow_mac, 959 nircm->conn_rule.return_mac, 960 nircm->conn_rule.flow_interface_num, 961 nircm->conn_rule.return_interface_num, 962 nircm->vlan_primary_rule.ingress_vlan_tag, 963 nircm->vlan_primary_rule.egress_vlan_tag, 964 nircm->vlan_secondary_rule.ingress_vlan_tag, 965 nircm->vlan_secondary_rule.egress_vlan_tag, 966 nircm->rule_flags, 967 nircm->valid_flags, 968 nircm->pppoe_rule.return_pppoe_session_id, 969 nircm->pppoe_rule.return_pppoe_remote_mac, 970 nircm->pppoe_rule.flow_pppoe_session_id, 971 nircm->pppoe_rule.flow_pppoe_remote_mac, 972 nircm->qos_rule.flow_qos_tag, nircm->qos_rule.flow_qos_tag, 973 nircm->qos_rule.return_qos_tag, nircm->qos_rule.return_qos_tag, 974 nircm->dscp_rule.flow_dscp, 975 nircm->dscp_rule.return_dscp); 976 977 if (protocol == IPPROTO_TCP) { 978 DEBUG_INFO("flow_window_scale: %u\n" 979 "flow_max_window: %u\n" 980 "flow_end: %u\n" 981 "flow_max_end: %u\n" 982 "return_window_scale: %u\n" 983 "return_max_window: %u\n" 984 "return_end: %u\n" 985 "return_max_end: %u\n", 986 nircm->tcp_rule.flow_window_scale, 987 nircm->tcp_rule.flow_max_window, 988 nircm->tcp_rule.flow_end, 989 nircm->tcp_rule.flow_max_end, 990 nircm->tcp_rule.return_window_scale, 991 nircm->tcp_rule.return_max_window, 992 nircm->tcp_rule.return_end, 993 nircm->tcp_rule.return_max_end); 994 } 995 996 /* 997 * Now that the rule has been constructed we re-compare the generation occurrance counter. 998 * If there has been a change then we abort because the rule may have been created using 999 * unstable data - especially if another thread has begun regeneration of the connection state. 1000 * NOTE: This does not prevent a regen from being flagged immediately after this line of code either, 1001 * or while the acceleration rule is in flight to the nss. 1002 * This is only to check for consistency of rule state - not that the state is stale. 1003 * Remember that the connection is marked as "accel pending state" so if a regen is flagged immediately 1004 * after this check passes, the connection will be decelerated and refreshed very quickly. 1005 */ 1006 if (regen_occurrances != ecm_db_connection_regeneration_occurrances_get(feci->ci)) { 1007 DEBUG_INFO("%p: connection:%p regen occurred - aborting accel rule.\n", feci, feci->ci); 1008 ecm_nss_ipv6_accel_pending_clear(feci, ECM_FRONT_END_ACCELERATION_MODE_DECEL); 1009 return; 1010 } 1011 1012 /* 1013 * Ref the connection before issuing an NSS rule 1014 * This ensures that when the NSS responds to the command - which may even be immediately - 1015 * the callback function can trust the correct ref was taken for its purpose. 1016 * NOTE: remember that this will also implicitly hold the feci. 1017 */ 1018 ecm_db_connection_ref(feci->ci); 1019 1020 /* 1021 * We are about to issue the command, record the time of transmission 1022 */ 1023 spin_lock_bh(&feci->lock); 1024 feci->stats.cmd_time_begun = jiffies; 1025 spin_unlock_bh(&feci->lock); 1026 1027 /* 1028 * Call the rule create function 1029 */ 1030 nss_tx_status = nss_ipv6_tx(ecm_nss_ipv6_nss_ipv6_mgr, &nim); 1031 if (nss_tx_status == NSS_TX_SUCCESS) { 1032 /* 1033 * Reset the driver_fail count - transmission was okay here. 1034 */ 1035 spin_lock_bh(&feci->lock); 1036 feci->stats.driver_fail = 0; 1037 spin_unlock_bh(&feci->lock); 1038 return; 1039 } 1040 1041 /* 1042 * Release that ref! 1043 */ 1044 ecm_db_connection_deref(feci->ci); 1045 1046 /* 1047 * TX failed 1048 */ 1049 spin_lock_bh(&feci->lock); 1050 DEBUG_ASSERT(feci->accel_mode == ECM_FRONT_END_ACCELERATION_MODE_ACCEL_PENDING, "%p: Accel mode unexpected: %d\n", npci, feci->accel_mode); 1051 feci->stats.driver_fail_total++; 1052 feci->stats.driver_fail++; 1053 if (feci->stats.driver_fail >= feci->stats.driver_fail_limit) { 1054 DEBUG_WARN("%p: Accel failed - driver fail limit\n", npci); 1055 result_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DRIVER; 1056 } else { 1057 result_mode = ECM_FRONT_END_ACCELERATION_MODE_DECEL; 1058 } 1059 1060 spin_lock_bh(&ecm_nss_ipv6_lock); 1061 _ecm_nss_ipv6_accel_pending_clear(feci, result_mode); 1062 spin_unlock_bh(&ecm_nss_ipv6_lock); 1063 1064 spin_unlock_bh(&feci->lock); 1065 return; 1066 1067ported_accel_bad_rule: 1068 ; 1069 1070 /* 1071 * Jump to here when rule data is bad and an offload command cannot be constructed 1072 */ 1073 DEBUG_WARN("%p: Accel failed - bad rule\n", npci); 1074 ecm_nss_ipv6_accel_pending_clear(feci, ECM_FRONT_END_ACCELERATION_MODE_FAIL_RULE); 1075} 1076 1077/* 1078 * ecm_nss_ported_ipv6_connection_destroy_callback() 1079 * Callback for handling destroy ack/nack calls. 1080 */ 1081static void ecm_nss_ported_ipv6_connection_destroy_callback(void *app_data, struct nss_ipv6_msg *nim) 1082{ 1083 struct nss_ipv6_rule_destroy_msg *nirdm = &nim->msg.rule_destroy; 1084 uint32_t serial = (uint32_t)app_data; 1085 struct ecm_db_connection_instance *ci; 1086 struct ecm_front_end_connection_instance *feci; 1087 struct ecm_nss_ported_ipv6_connection_instance *npci; 1088 ip_addr_t flow_ip; 1089 ip_addr_t return_ip; 1090 1091 /* 1092 * Is this a response to a destroy message? 1093 */ 1094 if (nim->cm.type != NSS_IPV6_TX_DESTROY_RULE_MSG) { 1095 DEBUG_ERROR("%p: ported destroy callback with improper type: %d\n", nim, nim->cm.type); 1096 return; 1097 } 1098 1099 /* 1100 * Look up ecm connection so that we can update the status. 1101 */ 1102 ci = ecm_db_connection_serial_find_and_ref(serial); 1103 if (!ci) { 1104 DEBUG_TRACE("%p: destroy callback, connection not found, serial: %u\n", nim, serial); 1105 return; 1106 } 1107 1108 /* 1109 * Release ref held for this ack/nack response. 1110 * NOTE: It's okay to do this here, ci won't go away, because the ci is held as 1111 * a result of the ecm_db_connection_serial_find_and_ref() 1112 */ 1113 ecm_db_connection_deref(ci); 1114 1115 /* 1116 * Get the front end instance 1117 */ 1118 feci = ecm_db_connection_front_end_get_and_ref(ci); 1119 npci = (struct ecm_nss_ported_ipv6_connection_instance *)feci; 1120 DEBUG_CHECK_MAGIC(npci, ECM_NSS_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", npci); 1121 1122 ECM_NSS_IPV6_ADDR_TO_IP_ADDR(flow_ip, nirdm->tuple.flow_ip); 1123 ECM_NSS_IPV6_ADDR_TO_IP_ADDR(return_ip, nirdm->tuple.return_ip); 1124 1125 /* 1126 * Record command duration 1127 */ 1128 ecm_nss_ipv6_decel_done_time_update(feci); 1129 1130 /* 1131 * Dump some useful trace information. 1132 */ 1133 DEBUG_TRACE("%p: decelerate response for connection: %p\n", npci, feci->ci); 1134 DEBUG_TRACE("%p: flow_ip: " ECM_IP_ADDR_OCTAL_FMT ":%d\n", npci, ECM_IP_ADDR_TO_OCTAL(flow_ip), nirdm->tuple.flow_ident); 1135 DEBUG_TRACE("%p: return_ip: " ECM_IP_ADDR_OCTAL_FMT ":%d\n", npci, ECM_IP_ADDR_TO_OCTAL(return_ip), nirdm->tuple.return_ident); 1136 DEBUG_TRACE("%p: protocol: %d\n", npci, nirdm->tuple.protocol); 1137 1138 /* 1139 * Drop decel pending counter 1140 */ 1141 spin_lock_bh(&ecm_nss_ipv6_lock); 1142 ecm_nss_ipv6_pending_decel_count--; 1143 DEBUG_ASSERT(ecm_nss_ipv6_pending_decel_count >= 0, "Bad decel pending counter\n"); 1144 spin_unlock_bh(&ecm_nss_ipv6_lock); 1145 1146 spin_lock_bh(&feci->lock); 1147 1148 /* 1149 * If decel is not still pending then it's possible that the NSS ended acceleration by some other reason e.g. flush 1150 * In which case we cannot rely on the response we get here. 1151 */ 1152 if (feci->accel_mode != ECM_FRONT_END_ACCELERATION_MODE_DECEL_PENDING) { 1153 spin_unlock_bh(&feci->lock); 1154 1155 /* 1156 * Release the connections. 1157 */ 1158 feci->deref(feci); 1159 ecm_db_connection_deref(ci); 1160 return; 1161 } 1162 1163 DEBUG_TRACE("%p: response: %d\n", npci, nim->cm.response); 1164 if (nim->cm.response != NSS_CMN_RESPONSE_ACK) { 1165 feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DECEL; 1166 } else { 1167 feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_DECEL; 1168 } 1169 1170 /* 1171 * If connection became defunct then set mode so that no further accel/decel attempts occur. 1172 */ 1173 if (feci->is_defunct) { 1174 feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT; 1175 } 1176 spin_unlock_bh(&feci->lock); 1177 1178 /* 1179 * Ported acceleration ends 1180 */ 1181 spin_lock_bh(&ecm_nss_ipv6_lock); 1182 ecm_nss_ported_ipv6_accelerated_count[npci->ported_accelerated_count_index]--; /* Protocol specific counter */ 1183 DEBUG_ASSERT(ecm_nss_ported_ipv6_accelerated_count[npci->ported_accelerated_count_index] >= 0, "Bad udp accel counter\n"); 1184 ecm_nss_ipv6_accelerated_count--; /* General running counter */ 1185 DEBUG_ASSERT(ecm_nss_ipv6_accelerated_count >= 0, "Bad accel counter\n"); 1186 spin_unlock_bh(&ecm_nss_ipv6_lock); 1187 1188 /* 1189 * Release the connections. 1190 */ 1191 feci->deref(feci); 1192 ecm_db_connection_deref(ci); 1193} 1194 1195/* 1196 * ecm_nss_ported_ipv6_connection_decelerate() 1197 * Decelerate a connection 1198 */ 1199static void ecm_nss_ported_ipv6_connection_decelerate(struct ecm_front_end_connection_instance *feci) 1200{ 1201 struct ecm_nss_ported_ipv6_connection_instance *npci = (struct ecm_nss_ported_ipv6_connection_instance *)feci; 1202 struct nss_ipv6_msg nim; 1203 struct nss_ipv6_rule_destroy_msg *nirdm; 1204 ip_addr_t src_ip; 1205 ip_addr_t dest_ip; 1206 nss_tx_status_t nss_tx_status; 1207 1208 DEBUG_CHECK_MAGIC(npci, ECM_NSS_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", npci); 1209 1210 /* 1211 * If decelerate is in error or already pending then ignore 1212 */ 1213 spin_lock_bh(&feci->lock); 1214 if (feci->stats.decelerate_pending) { 1215 spin_unlock_bh(&feci->lock); 1216 return; 1217 } 1218 1219 /* 1220 * If acceleration is pending then we cannot decelerate right now or we will race with it 1221 * Set a decelerate pending flag that will be actioned when the acceleration command is complete. 1222 */ 1223 if (feci->accel_mode == ECM_FRONT_END_ACCELERATION_MODE_ACCEL_PENDING) { 1224 feci->stats.decelerate_pending = true; 1225 spin_unlock_bh(&feci->lock); 1226 return; 1227 } 1228 1229 /* 1230 * Can only decelerate if accelerated 1231 * NOTE: This will also deny accel when the connection is in fail condition too. 1232 */ 1233 if (feci->accel_mode != ECM_FRONT_END_ACCELERATION_MODE_ACCEL) { 1234 spin_unlock_bh(&feci->lock); 1235 return; 1236 } 1237 1238 /* 1239 * Initiate deceleration 1240 */ 1241 feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_DECEL_PENDING; 1242 spin_unlock_bh(&feci->lock); 1243 1244 /* 1245 * Increment the decel pending counter 1246 */ 1247 spin_lock_bh(&ecm_nss_ipv6_lock); 1248 ecm_nss_ipv6_pending_decel_count++; 1249 spin_unlock_bh(&ecm_nss_ipv6_lock); 1250 1251 /* 1252 * Prepare deceleration message 1253 */ 1254 nss_ipv6_msg_init(&nim, NSS_IPV6_RX_INTERFACE, NSS_IPV6_TX_DESTROY_RULE_MSG, 1255 sizeof(struct nss_ipv6_rule_destroy_msg), 1256 ecm_nss_ported_ipv6_connection_destroy_callback, 1257 (void *)ecm_db_connection_serial_get(feci->ci)); 1258 1259 nirdm = &nim.msg.rule_destroy; 1260 nirdm->tuple.protocol = (int32_t)ecm_db_connection_protocol_get(feci->ci);; 1261 1262 /* 1263 * Get addressing information 1264 */ 1265 ecm_db_connection_from_address_get(feci->ci, src_ip); 1266 ECM_IP_ADDR_TO_NSS_IPV6_ADDR(nirdm->tuple.flow_ip, src_ip); 1267 ecm_db_connection_to_address_get(feci->ci, dest_ip); 1268 ECM_IP_ADDR_TO_NSS_IPV6_ADDR(nirdm->tuple.return_ip, dest_ip); 1269 nirdm->tuple.flow_ident = ecm_db_connection_from_port_get(feci->ci); 1270 nirdm->tuple.return_ident = ecm_db_connection_to_port_get(feci->ci); 1271 1272 DEBUG_INFO("%p: Ported Connection %p decelerate\n" 1273 "protocol: %d\n" 1274 "src_ip: " ECM_IP_ADDR_OCTAL_FMT ":%d\n" 1275 "dest_ip: " ECM_IP_ADDR_OCTAL_FMT ":%d\n", 1276 npci, feci->ci, nirdm->tuple.protocol, 1277 ECM_IP_ADDR_TO_OCTAL(src_ip), nirdm->tuple.flow_ident, 1278 ECM_IP_ADDR_TO_OCTAL(dest_ip), nirdm->tuple.return_ident); 1279 1280 /* 1281 * Take a ref to the feci->ci so that it will persist until we get a response from the NSS. 1282 * NOTE: This will implicitly hold the feci too. 1283 */ 1284 ecm_db_connection_ref(feci->ci); 1285 1286 /* 1287 * We are about to issue the command, record the time of transmission 1288 */ 1289 spin_lock_bh(&feci->lock); 1290 feci->stats.cmd_time_begun = jiffies; 1291 spin_unlock_bh(&feci->lock); 1292 1293 /* 1294 * Destroy the NSS connection cache entry. 1295 */ 1296 nss_tx_status = nss_ipv6_tx(ecm_nss_ipv6_nss_ipv6_mgr, &nim); 1297 if (nss_tx_status == NSS_TX_SUCCESS) { 1298 /* 1299 * Reset the driver_fail count - transmission was okay here. 1300 */ 1301 spin_lock_bh(&feci->lock); 1302 feci->stats.driver_fail = 0; 1303 spin_unlock_bh(&feci->lock); 1304 return; 1305 } 1306 1307 /* 1308 * Release the ref take, NSS driver did not accept our command. 1309 */ 1310 ecm_db_connection_deref(feci->ci); 1311 1312 /* 1313 * TX failed 1314 */ 1315 spin_lock_bh(&feci->lock); 1316 feci->stats.driver_fail_total++; 1317 feci->stats.driver_fail++; 1318 if (feci->stats.driver_fail >= feci->stats.driver_fail_limit) { 1319 DEBUG_WARN("%p: Decel failed - driver fail limit\n", npci); 1320 feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DRIVER; 1321 } 1322 spin_unlock_bh(&feci->lock); 1323 1324 /* 1325 * Could not send the request, decrement the decel pending counter 1326 */ 1327 spin_lock_bh(&ecm_nss_ipv6_lock); 1328 ecm_nss_ipv6_pending_decel_count--; 1329 DEBUG_ASSERT(ecm_nss_ipv6_pending_decel_count >= 0, "Bad decel pending counter\n"); 1330 spin_unlock_bh(&ecm_nss_ipv6_lock); 1331} 1332 1333/* 1334 * ecm_nss_ported_ipv6_connection_defunct_callback() 1335 * Callback to be called when a ported connection has become defunct. 1336 */ 1337static void ecm_nss_ported_ipv6_connection_defunct_callback(void *arg) 1338{ 1339 struct ecm_front_end_connection_instance *feci = (struct ecm_front_end_connection_instance *)arg; 1340 struct ecm_nss_ported_ipv6_connection_instance *npci = (struct ecm_nss_ported_ipv6_connection_instance *)feci; 1341 1342 DEBUG_CHECK_MAGIC(npci, ECM_NSS_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", npci); 1343 1344 spin_lock_bh(&feci->lock); 1345 1346 /* 1347 * If connection has already become defunct, do nothing. 1348 */ 1349 if (feci->is_defunct) { 1350 spin_unlock_bh(&feci->lock); 1351 return; 1352 } 1353 feci->is_defunct = true; 1354 1355 /* 1356 * If the connection is already in one of the fail modes, do nothing, keep the current accel_mode. 1357 */ 1358 if (ECM_FRONT_END_ACCELERATION_FAILED(feci->accel_mode)) { 1359 spin_unlock_bh(&feci->lock); 1360 return; 1361 } 1362 1363 /* 1364 * If the connection is decel then ensure it will not attempt accel while defunct. 1365 */ 1366 if (feci->accel_mode == ECM_FRONT_END_ACCELERATION_MODE_DECEL) { 1367 feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT; 1368 spin_unlock_bh(&feci->lock); 1369 return; 1370 } 1371 1372 /* 1373 * If the connection is decel pending then decel operation is in progress anyway. 1374 */ 1375 if (feci->accel_mode == ECM_FRONT_END_ACCELERATION_MODE_DECEL_PENDING) { 1376 spin_unlock_bh(&feci->lock); 1377 return; 1378 } 1379 1380 /* 1381 * If none of the cases matched above, this means the connection is in one of the 1382 * accel modes (accel or accel_pending) so we force a deceleration. 1383 * NOTE: If the mode is accel pending then the decel will be actioned when that is completed. 1384 */ 1385 spin_unlock_bh(&feci->lock); 1386 ecm_nss_ported_ipv6_connection_decelerate(feci); 1387} 1388 1389/* 1390 * ecm_nss_ported_ipv6_connection_accel_state_get() 1391 * Get acceleration state 1392 */ 1393static ecm_front_end_acceleration_mode_t ecm_nss_ported_ipv6_connection_accel_state_get(struct ecm_front_end_connection_instance *feci) 1394{ 1395 struct ecm_nss_ported_ipv6_connection_instance *npci = (struct ecm_nss_ported_ipv6_connection_instance *)feci; 1396 ecm_front_end_acceleration_mode_t state; 1397 1398 DEBUG_CHECK_MAGIC(npci, ECM_NSS_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", npci); 1399 spin_lock_bh(&feci->lock); 1400 state = feci->accel_mode; 1401 spin_unlock_bh(&feci->lock); 1402 return state; 1403} 1404 1405/* 1406 * ecm_nss_ported_ipv6_connection_action_seen() 1407 * Acceleration action / activity has been seen for this connection. 1408 * 1409 * NOTE: Call the action_seen() method when the NSS has demonstrated that it has offloaded some data for a connection. 1410 */ 1411static void ecm_nss_ported_ipv6_connection_action_seen(struct ecm_front_end_connection_instance *feci) 1412{ 1413 struct ecm_nss_ported_ipv6_connection_instance *npci = (struct ecm_nss_ported_ipv6_connection_instance *)feci; 1414 1415 DEBUG_CHECK_MAGIC(npci, ECM_NSS_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", npci); 1416 DEBUG_INFO("%p: Action seen\n", npci); 1417 spin_lock_bh(&feci->lock); 1418 feci->stats.no_action_seen = 0; 1419 spin_unlock_bh(&feci->lock); 1420} 1421 1422/* 1423 * ecm_nss_ported_ipv6_connection_accel_ceased() 1424 * NSS has indicated that acceleration has stopped. 1425 * 1426 * NOTE: This is called in response to an NSS self-initiated termination of acceleration. 1427 * This must NOT be called because the ECM terminated the acceleration. 1428 */ 1429static void ecm_nss_ported_ipv6_connection_accel_ceased(struct ecm_front_end_connection_instance *feci) 1430{ 1431 struct ecm_nss_ported_ipv6_connection_instance *npci = (struct ecm_nss_ported_ipv6_connection_instance *)feci; 1432 1433 DEBUG_CHECK_MAGIC(npci, ECM_NSS_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", npci); 1434 DEBUG_INFO("%p: accel ceased\n", npci); 1435 1436 spin_lock_bh(&feci->lock); 1437 1438 /* 1439 * If we are in accel-pending state then the NSS has issued a flush out-of-order 1440 * with the ACK/NACK we are actually waiting for. 1441 * To work around this we record a "flush has already happened" and will action it when we finally get that ACK/NACK. 1442 * GGG TODO This should eventually be removed when the NSS honours messaging sequence. 1443 */ 1444 if (feci->accel_mode == ECM_FRONT_END_ACCELERATION_MODE_ACCEL_PENDING) { 1445 feci->stats.flush_happened = true; 1446 feci->stats.flush_happened_total++; 1447 spin_unlock_bh(&feci->lock); 1448 return; 1449 } 1450 1451 /* 1452 * If connection is no longer accelerated by the time we get here just ignore the command 1453 */ 1454 if (feci->accel_mode != ECM_FRONT_END_ACCELERATION_MODE_ACCEL) { 1455 spin_unlock_bh(&feci->lock); 1456 return; 1457 } 1458 1459 /* 1460 * If the no_action_seen counter was not reset then acceleration ended without any offload action 1461 */ 1462 if (feci->stats.no_action_seen) { 1463 feci->stats.no_action_seen_total++; 1464 } 1465 1466 /* 1467 * If the no_action_seen indicates successive cessations of acceleration without any offload action occuring 1468 * then we fail out this connection 1469 */ 1470 if (feci->stats.no_action_seen >= feci->stats.no_action_seen_limit) { 1471 feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_NO_ACTION; 1472 } else { 1473 feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_DECEL; 1474 } 1475 spin_unlock_bh(&feci->lock); 1476 1477 /* 1478 * Ported acceleration ends 1479 */ 1480 spin_lock_bh(&ecm_nss_ipv6_lock); 1481 ecm_nss_ported_ipv6_accelerated_count[npci->ported_accelerated_count_index]--; /* Protocol specific counter */ 1482 DEBUG_ASSERT(ecm_nss_ported_ipv6_accelerated_count[npci->ported_accelerated_count_index] >= 0, "Bad ported accel counter\n"); 1483 ecm_nss_ipv6_accelerated_count--; /* General running counter */ 1484 DEBUG_ASSERT(ecm_nss_ipv6_accelerated_count >= 0, "Bad accel counter\n"); 1485 spin_unlock_bh(&ecm_nss_ipv6_lock); 1486} 1487 1488/* 1489 * ecm_nss_ported_ipv6_connection_ref() 1490 * Ref a connection front end instance 1491 */ 1492static void ecm_nss_ported_ipv6_connection_ref(struct ecm_front_end_connection_instance *feci) 1493{ 1494 struct ecm_nss_ported_ipv6_connection_instance *npci = (struct ecm_nss_ported_ipv6_connection_instance *)feci; 1495 1496 DEBUG_CHECK_MAGIC(npci, ECM_NSS_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", npci); 1497 spin_lock_bh(&feci->lock); 1498 feci->refs++; 1499 DEBUG_TRACE("%p: npci ref %d\n", npci, feci->refs); 1500 DEBUG_ASSERT(feci->refs > 0, "%p: ref wrap\n", npci); 1501 spin_unlock_bh(&feci->lock); 1502} 1503 1504/* 1505 * ecm_nss_ported_ipv6_connection_deref() 1506 * Deref a connection front end instance 1507 */ 1508static int ecm_nss_ported_ipv6_connection_deref(struct ecm_front_end_connection_instance *feci) 1509{ 1510 struct ecm_nss_ported_ipv6_connection_instance *npci = (struct ecm_nss_ported_ipv6_connection_instance *)feci; 1511 1512 DEBUG_CHECK_MAGIC(npci, ECM_NSS_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", npci); 1513 1514 spin_lock_bh(&feci->lock); 1515 feci->refs--; 1516 DEBUG_ASSERT(feci->refs >= 0, "%p: ref wrap\n", npci); 1517 1518 if (feci->refs > 0) { 1519 int refs = feci->refs; 1520 spin_unlock_bh(&feci->lock); 1521 DEBUG_TRACE("%p: npci deref %d\n", npci, refs); 1522 return refs; 1523 } 1524 spin_unlock_bh(&feci->lock); 1525 1526 /* 1527 * We can now destroy the instance 1528 */ 1529 DEBUG_TRACE("%p: npci final\n", npci); 1530 DEBUG_CLEAR_MAGIC(npci); 1531 kfree(npci); 1532 1533 return 0; 1534} 1535 1536#ifdef ECM_STATE_OUTPUT_ENABLE 1537/* 1538 * ecm_nss_ported_ipv6_connection_state_get() 1539 * Return the state of this ported front end instance 1540 */ 1541static int ecm_nss_ported_ipv6_connection_state_get(struct ecm_front_end_connection_instance *feci, struct ecm_state_file_instance *sfi) 1542{ 1543 int result; 1544 bool can_accel; 1545 ecm_front_end_acceleration_mode_t accel_mode; 1546 struct ecm_front_end_connection_mode_stats stats; 1547 struct ecm_nss_ported_ipv6_connection_instance *npci = (struct ecm_nss_ported_ipv6_connection_instance *)feci; 1548 1549 DEBUG_CHECK_MAGIC(npci, ECM_NSS_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%p: magic failed", npci); 1550 1551 spin_lock_bh(&feci->lock); 1552 can_accel = feci->can_accel; 1553 accel_mode = feci->accel_mode; 1554 memcpy(&stats, &feci->stats, sizeof(struct ecm_front_end_connection_mode_stats)); 1555 spin_unlock_bh(&feci->lock); 1556 1557 if ((result = ecm_state_prefix_add(sfi, "front_end_v6.ported"))) { 1558 return result; 1559 } 1560 1561 if ((result = ecm_state_write(sfi, "can_accel", "%d", can_accel))) { 1562 return result; 1563 } 1564 if ((result = ecm_state_write(sfi, "accel_mode", "%d", accel_mode))) { 1565 return result; 1566 } 1567 if ((result = ecm_state_write(sfi, "decelerate_pending", "%d", stats.decelerate_pending))) { 1568 return result; 1569 } 1570 if ((result = ecm_state_write(sfi, "flush_happened_total", "%d", stats.flush_happened_total))) { 1571 return result; 1572 } 1573 if ((result = ecm_state_write(sfi, "no_action_seen_total", "%d", stats.no_action_seen_total))) { 1574 return result; 1575 } 1576 if ((result = ecm_state_write(sfi, "no_action_seen", "%d", stats.no_action_seen))) { 1577 return result; 1578 } 1579 if ((result = ecm_state_write(sfi, "no_action_seen_limit", "%d", stats.no_action_seen_limit))) { 1580 return result; 1581 } 1582 if ((result = ecm_state_write(sfi, "driver_fail_total", "%d", stats.driver_fail_total))) { 1583 return result; 1584 } 1585 if ((result = ecm_state_write(sfi, "driver_fail", "%d", stats.driver_fail))) { 1586 return result; 1587 } 1588 if ((result = ecm_state_write(sfi, "driver_fail_limit", "%d", stats.driver_fail_limit))) { 1589 return result; 1590 } 1591 if ((result = ecm_state_write(sfi, "ae_nack_total", "%d", stats.ae_nack_total))) { 1592 return result; 1593 } 1594 if ((result = ecm_state_write(sfi, "ae_nack", "%d", stats.ae_nack))) { 1595 return result; 1596 } 1597 if ((result = ecm_state_write(sfi, "ae_nack_limit", "%d", stats.ae_nack_limit))) { 1598 return result; 1599 } 1600 1601 return ecm_state_prefix_remove(sfi); 1602} 1603#endif 1604 1605/* 1606 * ecm_nss_ported_ipv6_connection_instance_alloc() 1607 * Create a front end instance specific for ported connection 1608 */ 1609static struct ecm_nss_ported_ipv6_connection_instance *ecm_nss_ported_ipv6_connection_instance_alloc( 1610 struct ecm_db_connection_instance *ci, 1611 int protocol, 1612 bool can_accel) 1613{ 1614 struct ecm_nss_ported_ipv6_connection_instance *npci; 1615 struct ecm_front_end_connection_instance *feci; 1616 1617 npci = (struct ecm_nss_ported_ipv6_connection_instance *)kzalloc(sizeof(struct ecm_nss_ported_ipv6_connection_instance), GFP_ATOMIC | __GFP_NOWARN); 1618 if (!npci) { 1619 DEBUG_WARN("Ported Front end alloc failed\n"); 1620 return NULL; 1621 } 1622 1623 /* 1624 * Refs is 1 for the creator of the connection 1625 */ 1626 feci = (struct ecm_front_end_connection_instance *)npci; 1627 feci->refs = 1; 1628 DEBUG_SET_MAGIC(npci, ECM_NSS_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC); 1629 spin_lock_init(&feci->lock); 1630 1631 feci->can_accel = can_accel; 1632 feci->accel_mode = (can_accel) ? ECM_FRONT_END_ACCELERATION_MODE_DECEL : ECM_FRONT_END_ACCELERATION_MODE_FAIL_DENIED; 1633 spin_lock_bh(&ecm_nss_ipv6_lock); 1634 feci->stats.no_action_seen_limit = ecm_nss_ipv6_no_action_limit_default; 1635 feci->stats.driver_fail_limit = ecm_nss_ipv6_driver_fail_limit_default; 1636 feci->stats.ae_nack_limit = ecm_nss_ipv6_nack_limit_default; 1637 spin_unlock_bh(&ecm_nss_ipv6_lock); 1638 1639 /* 1640 * Copy reference to connection - no need to ref ci as ci maintains a ref to this instance instead (this instance persists for as long as ci does) 1641 */ 1642 feci->ci = ci; 1643 1644 /* 1645 * Populate the methods and callbacks 1646 */ 1647 feci->ref = ecm_nss_ported_ipv6_connection_ref; 1648 feci->deref = ecm_nss_ported_ipv6_connection_deref; 1649 feci->decelerate = ecm_nss_ported_ipv6_connection_decelerate; 1650 feci->accel_state_get = ecm_nss_ported_ipv6_connection_accel_state_get; 1651 feci->action_seen = ecm_nss_ported_ipv6_connection_action_seen; 1652 feci->accel_ceased = ecm_nss_ported_ipv6_connection_accel_ceased; 1653#ifdef ECM_STATE_OUTPUT_ENABLE 1654 feci->state_get = ecm_nss_ported_ipv6_connection_state_get; 1655#endif 1656 feci->ae_interface_number_by_dev_get = ecm_nss_common_get_interface_number_by_dev; 1657 1658 if (protocol == IPPROTO_TCP) { 1659 npci->ported_accelerated_count_index = ECM_NSS_PORTED_IPV6_PROTO_TCP; 1660 } else if (protocol == IPPROTO_UDP) { 1661 npci->ported_accelerated_count_index = ECM_NSS_PORTED_IPV6_PROTO_UDP; 1662 } else { 1663 DEBUG_WARN("%p: Wrong protocol: %d\n", npci, protocol); 1664 DEBUG_CLEAR_MAGIC(npci); 1665 kfree(npci); 1666 return NULL; 1667 } 1668 1669 return npci; 1670} 1671 1672/* 1673 * ecm_nss_ported_ipv6_process() 1674 * Process a ported packet 1675 */ 1676unsigned int ecm_nss_ported_ipv6_process(struct net_device *out_dev, 1677 struct net_device *in_dev, 1678 uint8_t *src_node_addr, 1679 uint8_t *dest_node_addr, 1680 bool can_accel, bool is_routed, bool is_l2_encap, struct sk_buff *skb, 1681 struct ecm_tracker_ip_header *iph, 1682 struct nf_conn *ct, ecm_tracker_sender_type_t sender, ecm_db_direction_t ecm_dir, 1683 struct nf_conntrack_tuple *orig_tuple, struct nf_conntrack_tuple *reply_tuple, 1684 ip_addr_t ip_src_addr, ip_addr_t ip_dest_addr) 1685{ 1686 struct tcphdr *tcp_hdr; 1687 struct tcphdr tcp_hdr_buff; 1688 struct udphdr *udp_hdr; 1689 struct udphdr udp_hdr_buff; 1690 int src_port; 1691 int dest_port; 1692 struct ecm_db_connection_instance *ci; 1693 ip_addr_t match_addr; 1694 struct ecm_classifier_instance *assignments[ECM_CLASSIFIER_TYPES]; 1695 int aci_index; 1696 int assignment_count; 1697 ecm_db_timer_group_t ci_orig_timer_group; 1698 struct ecm_classifier_process_response prevalent_pr; 1699 int protocol = (int)orig_tuple->dst.protonum; 1700 1701 if (protocol == IPPROTO_TCP) { 1702 /* 1703 * Extract TCP header to obtain port information 1704 */ 1705 tcp_hdr = ecm_tracker_tcp_check_header_and_read(skb, iph, &tcp_hdr_buff); 1706 if (unlikely(!tcp_hdr)) { 1707 DEBUG_WARN("TCP packet header %p\n", skb); 1708 return NF_ACCEPT; 1709 } 1710 1711 /* 1712 * Now extract information, if we have conntrack then use that (which would already be in the tuples) 1713 */ 1714 if (unlikely(!ct)) { 1715 orig_tuple->src.u.tcp.port = tcp_hdr->source; 1716 orig_tuple->dst.u.tcp.port = tcp_hdr->dest; 1717 reply_tuple->src.u.tcp.port = tcp_hdr->dest; 1718 reply_tuple->dst.u.tcp.port = tcp_hdr->source; 1719 } 1720 1721 /* 1722 * Extract transport port information 1723 * Refer to the ecm_nss_ipv6_process() for information on how we extract this information. 1724 */ 1725 if (sender == ECM_TRACKER_SENDER_TYPE_SRC) { 1726 switch(ecm_dir) { 1727 case ECM_DB_DIRECTION_NON_NAT: 1728 case ECM_DB_DIRECTION_BRIDGED: 1729 src_port = ntohs(orig_tuple->src.u.tcp.port); 1730 dest_port = ntohs(orig_tuple->dst.u.tcp.port); 1731 break; 1732 default: 1733 DEBUG_ASSERT(false, "Unhandled ecm_dir: %d\n", ecm_dir); 1734 } 1735 } else { 1736 switch(ecm_dir) { 1737 case ECM_DB_DIRECTION_NON_NAT: 1738 case ECM_DB_DIRECTION_BRIDGED: 1739 dest_port = ntohs(orig_tuple->src.u.tcp.port); 1740 src_port = ntohs(orig_tuple->dst.u.tcp.port); 1741 break; 1742 default: 1743 DEBUG_ASSERT(false, "Unhandled ecm_dir: %d\n", ecm_dir); 1744 } 1745 } 1746 1747 DEBUG_TRACE("TCP src: " ECM_IP_ADDR_OCTAL_FMT ":%d, dest: " ECM_IP_ADDR_OCTAL_FMT ":%d, dir %d\n", 1748 ECM_IP_ADDR_TO_OCTAL(ip_src_addr), src_port, ECM_IP_ADDR_TO_OCTAL(ip_dest_addr), dest_port, ecm_dir); 1749 } else if (protocol == IPPROTO_UDP) { 1750 /* 1751 * Extract UDP header to obtain port information 1752 */ 1753 udp_hdr = ecm_tracker_udp_check_header_and_read(skb, iph, &udp_hdr_buff); 1754 if (unlikely(!udp_hdr)) { 1755 DEBUG_WARN("Invalid UDP header in skb %p\n", skb); 1756 return NF_ACCEPT; 1757 } 1758 1759 /* 1760 * Deny acceleration for L2TP-over-UDP tunnel 1761 */ 1762 if (skb->sk) { 1763 if(skb->sk->sk_protocol == IPPROTO_UDP) { 1764 struct udp_sock *usk = udp_sk(skb->sk); 1765 if (usk) { 1766 if (unlikely(usk->encap_type == UDP_ENCAP_L2TPINUDP)) { 1767 DEBUG_TRACE("Skip packets for L2TP tunnel in skb %p\n", skb); 1768 can_accel = false; 1769 } 1770 } 1771 } 1772 } 1773 1774 /* 1775 * Now extract information, if we have conntrack then use that (which would already be in the tuples) 1776 */ 1777 if (unlikely(!ct)) { 1778 orig_tuple->src.u.udp.port = udp_hdr->source; 1779 orig_tuple->dst.u.udp.port = udp_hdr->dest; 1780 reply_tuple->src.u.udp.port = udp_hdr->dest; 1781 reply_tuple->dst.u.udp.port = udp_hdr->source; 1782 } 1783 1784 /* 1785 * Extract transport port information 1786 * Refer to the ecm_nss_ipv6_process() for information on how we extract this information. 1787 */ 1788 if (sender == ECM_TRACKER_SENDER_TYPE_SRC) { 1789 switch(ecm_dir) { 1790 case ECM_DB_DIRECTION_NON_NAT: 1791 case ECM_DB_DIRECTION_BRIDGED: 1792 src_port = ntohs(orig_tuple->src.u.udp.port); 1793 dest_port = ntohs(orig_tuple->dst.u.udp.port); 1794 break; 1795 default: 1796 DEBUG_ASSERT(false, "Unhandled ecm_dir: %d\n", ecm_dir); 1797 } 1798 } else { 1799 switch(ecm_dir) { 1800 case ECM_DB_DIRECTION_NON_NAT: 1801 case ECM_DB_DIRECTION_BRIDGED: 1802 dest_port = ntohs(orig_tuple->src.u.udp.port); 1803 src_port = ntohs(orig_tuple->dst.u.udp.port); 1804 break; 1805 default: 1806 DEBUG_ASSERT(false, "Unhandled ecm_dir: %d\n", ecm_dir); 1807 } 1808 } 1809 DEBUG_TRACE("UDP src: " ECM_IP_ADDR_OCTAL_FMT ":%d, dest: " ECM_IP_ADDR_OCTAL_FMT ":%d, dir %d\n", 1810 ECM_IP_ADDR_TO_OCTAL(ip_src_addr), src_port, ECM_IP_ADDR_TO_OCTAL(ip_dest_addr), dest_port, ecm_dir); 1811 } else { 1812 DEBUG_WARN("Wrong protocol: %d\n", protocol); 1813 return NF_ACCEPT; 1814 } 1815 1816 /* 1817 * Look up a connection 1818 */ 1819 ci = ecm_db_connection_find_and_ref(ip_src_addr, ip_dest_addr, protocol, src_port, dest_port); 1820 1821 /* 1822 * If there is no existing connection then create a new one. 1823 */ 1824 if (unlikely(!ci)) { 1825 struct ecm_db_mapping_instance *src_mi; 1826 struct ecm_db_mapping_instance *dest_mi; 1827 struct ecm_db_node_instance *src_ni; 1828 struct ecm_db_node_instance *dest_ni; 1829 struct ecm_classifier_default_instance *dci; 1830 struct ecm_db_connection_instance *nci; 1831 ecm_classifier_type_t classifier_type; 1832 struct ecm_front_end_connection_instance *feci; 1833 int32_t to_list_first; 1834 struct ecm_db_iface_instance *to_list[ECM_DB_IFACE_HEIRARCHY_MAX]; 1835 int32_t from_list_first; 1836 struct ecm_db_iface_instance *from_list[ECM_DB_IFACE_HEIRARCHY_MAX]; 1837 1838 DEBUG_INFO("New Ported connection from " ECM_IP_ADDR_OCTAL_FMT ":%u to " ECM_IP_ADDR_OCTAL_FMT ":%u\n", 1839 ECM_IP_ADDR_TO_OCTAL(ip_src_addr), src_port, ECM_IP_ADDR_TO_OCTAL(ip_dest_addr), dest_port); 1840 1841 /* 1842 * Before we attempt to create the connection are we being terminated? 1843 */ 1844 spin_lock_bh(&ecm_nss_ipv6_lock); 1845 if (ecm_nss_ipv6_terminate_pending) { 1846 spin_unlock_bh(&ecm_nss_ipv6_lock); 1847 DEBUG_WARN("Terminating\n"); 1848 1849 /* 1850 * As we are terminating we just allow the packet to pass - it's no longer our concern 1851 */ 1852 return NF_ACCEPT; 1853 } 1854 spin_unlock_bh(&ecm_nss_ipv6_lock); 1855 1856 /* 1857 * Does this connection have a conntrack entry? 1858 */ 1859 if (ct) { 1860 unsigned int conn_count; 1861 1862 /* 1863 * If we have exceeded the connection limit (according to conntrack) then abort 1864 * NOTE: Conntrack, when at its limit, will destroy a connection to make way for a new. 1865 * Conntrack won't exceed its limit but ECM can due to it needing to hold connections while 1866 * acceleration commands are in-flight. 1867 * This means that ECM can 'fall behind' somewhat with the connection state wrt conntrack connection state. 1868 * This is not seen as an issue since conntrack will have issued us with a destroy event for the flushed connection(s) 1869 * and we will eventually catch up. 1870 * Since ECM is capable of handling connections mid-flow ECM will pick up where it can. 1871 */ 1872 conn_count = (unsigned int)ecm_db_connection_count_get(); 1873 if (conn_count >= nf_conntrack_max) { 1874 DEBUG_WARN("ECM Connection count limit reached: db: %u, ct: %u\n", conn_count, nf_conntrack_max); 1875 return NF_ACCEPT; 1876 } 1877 1878 if (protocol == IPPROTO_TCP) { 1879 /* 1880 * No point in establishing a connection for one that is closing 1881 */ 1882 spin_lock_bh(&ct->lock); 1883 if (ct->proto.tcp.state >= TCP_CONNTRACK_FIN_WAIT && ct->proto.tcp.state <= TCP_CONNTRACK_CLOSE) { 1884 spin_unlock_bh(&ct->lock); 1885 DEBUG_TRACE("%p: Connection in termination state %#X\n", ct, ct->proto.tcp.state); 1886 return NF_ACCEPT; 1887 } 1888 spin_unlock_bh(&ct->lock); 1889 } 1890 } 1891 1892 /* 1893 * Now allocate the new connection 1894 */ 1895 nci = ecm_db_connection_alloc(); 1896 if (!nci) { 1897 DEBUG_WARN("Failed to allocate connection\n"); 1898 return NF_ACCEPT; 1899 } 1900 1901 /* 1902 * Connection must have a front end instance associated with it 1903 */ 1904 feci = (struct ecm_front_end_connection_instance *)ecm_nss_ported_ipv6_connection_instance_alloc(nci, protocol, can_accel); 1905 if (!feci) { 1906 ecm_db_connection_deref(nci); 1907 DEBUG_WARN("Failed to allocate front end\n"); 1908 return NF_ACCEPT; 1909 } 1910 1911 /* 1912 * Get the src and destination mappings 1913 * For this we also need the interface lists which we also set upon the new connection while we are at it. 1914 * GGG TODO rework terms of "src/dest" - these need to be named consistently as from/to as per database terms. 1915 * GGG TODO The empty list checks should not be needed, mapping_establish_and_ref() should fail out if there is no list anyway. 1916 */ 1917 DEBUG_TRACE("%p: Create the 'from' interface heirarchy list\n", nci); 1918 from_list_first = ecm_interface_heirarchy_construct(feci, from_list, ip_dest_addr, ip_src_addr, 6, protocol, in_dev, is_routed, in_dev, src_node_addr, dest_node_addr, NULL); 1919 if (from_list_first == ECM_DB_IFACE_HEIRARCHY_MAX) { 1920 feci->deref(feci); 1921 ecm_db_connection_deref(nci); 1922 DEBUG_WARN("Failed to obtain 'from' heirarchy list\n"); 1923 return NF_ACCEPT; 1924 } 1925 ecm_db_connection_from_interfaces_reset(nci, from_list, from_list_first); 1926 1927 DEBUG_TRACE("%p: Create source node\n", nci); 1928 src_ni = ecm_nss_ipv6_node_establish_and_ref(feci, in_dev, ip_src_addr, from_list, from_list_first, src_node_addr); 1929 ecm_db_connection_interfaces_deref(from_list, from_list_first); 1930 if (!src_ni) { 1931 feci->deref(feci); 1932 ecm_db_connection_deref(nci); 1933 DEBUG_WARN("Failed to establish source node\n"); 1934 return NF_ACCEPT; 1935 } 1936 1937 DEBUG_TRACE("%p: Create source mapping\n", nci); 1938 src_mi = ecm_nss_ipv6_mapping_establish_and_ref(ip_src_addr, src_port); 1939 if (!src_mi) { 1940 ecm_db_node_deref(src_ni); 1941 feci->deref(feci); 1942 ecm_db_connection_deref(nci); 1943 DEBUG_WARN("Failed to establish src mapping\n"); 1944 return NF_ACCEPT; 1945 } 1946 1947 DEBUG_TRACE("%p: Create the 'to' interface heirarchy list\n", nci); 1948 to_list_first = ecm_interface_heirarchy_construct(feci, to_list, ip_src_addr, ip_dest_addr, 6, protocol, out_dev, is_routed, in_dev, dest_node_addr, src_node_addr, NULL); 1949 if (to_list_first == ECM_DB_IFACE_HEIRARCHY_MAX) { 1950 ecm_db_mapping_deref(src_mi); 1951 ecm_db_node_deref(src_ni); 1952 feci->deref(feci); 1953 ecm_db_connection_deref(nci); 1954 DEBUG_WARN("Failed to obtain 'to' heirarchy list\n"); 1955 return NF_ACCEPT; 1956 } 1957 ecm_db_connection_to_interfaces_reset(nci, to_list, to_list_first); 1958 1959 DEBUG_TRACE("%p: Create dest node\n", nci); 1960 dest_ni = ecm_nss_ipv6_node_establish_and_ref(feci, out_dev, ip_dest_addr, to_list, to_list_first, dest_node_addr); 1961 ecm_db_connection_interfaces_deref(to_list, to_list_first); 1962 if (!dest_ni) { 1963 ecm_db_mapping_deref(src_mi); 1964 ecm_db_node_deref(src_ni); 1965 feci->deref(feci); 1966 ecm_db_connection_deref(nci); 1967 DEBUG_WARN("Failed to establish dest node\n"); 1968 return NF_ACCEPT; 1969 } 1970 1971 DEBUG_TRACE("%p: Create dest mapping\n", nci); 1972 dest_mi = ecm_nss_ipv6_mapping_establish_and_ref(ip_dest_addr, dest_port); 1973 if (!dest_mi) { 1974 ecm_db_node_deref(dest_ni); 1975 ecm_db_mapping_deref(src_mi); 1976 ecm_db_node_deref(src_ni); 1977 feci->deref(feci); 1978 ecm_db_connection_deref(nci); 1979 DEBUG_WARN("Failed to establish dest mapping\n"); 1980 return NF_ACCEPT; 1981 } 1982 1983 /* 1984 * Every connection also needs a default classifier which is considered 'special' 1985 */ 1986 dci = ecm_classifier_default_instance_alloc(nci, protocol, ecm_dir, src_port, dest_port); 1987 if (!dci) { 1988 ecm_db_mapping_deref(dest_mi); 1989 ecm_db_node_deref(dest_ni); 1990 ecm_db_mapping_deref(src_mi); 1991 ecm_db_node_deref(src_ni); 1992 feci->deref(feci); 1993 ecm_db_connection_deref(nci); 1994 DEBUG_WARN("Failed to allocate default classifier\n"); 1995 return NF_ACCEPT; 1996 } 1997 ecm_db_connection_classifier_assign(nci, (struct ecm_classifier_instance *)dci); 1998 1999 /* 2000 * Every connection starts with a full complement of classifiers assigned. 2001 * NOTE: Default classifier is a special case considered previously 2002 */ 2003 for (classifier_type = ECM_CLASSIFIER_TYPE_DEFAULT + 1; classifier_type < ECM_CLASSIFIER_TYPES; ++classifier_type) { 2004 struct ecm_classifier_instance *aci = ecm_nss_ipv6_assign_classifier(nci, classifier_type); 2005 if (aci) { 2006 aci->deref(aci); 2007 } else { 2008 dci->base.deref((struct ecm_classifier_instance *)dci); 2009 ecm_db_mapping_deref(dest_mi); 2010 ecm_db_node_deref(dest_ni); 2011 ecm_db_mapping_deref(src_mi); 2012 ecm_db_node_deref(src_ni); 2013 feci->deref(feci); 2014 ecm_db_connection_deref(nci); 2015 DEBUG_WARN("Failed to allocate classifiers assignments\n"); 2016 return NF_ACCEPT; 2017 } 2018 } 2019 2020 /* 2021 * Now add the connection into the database. 2022 * NOTE: In an SMP situation such as ours there is a possibility that more than one packet for the same 2023 * connection is being processed simultaneously. 2024 * We *could* end up creating more than one connection instance for the same actual connection. 2025 * To guard against this we now perform a mutex'd lookup of the connection + add once more - another cpu may have created it before us. 2026 */ 2027 spin_lock_bh(&ecm_nss_ipv6_lock); 2028 ci = ecm_db_connection_find_and_ref(ip_src_addr, ip_dest_addr, protocol, src_port, dest_port); 2029 if (ci) { 2030 /* 2031 * Another cpu created the same connection before us - use the one we just found 2032 */ 2033 spin_unlock_bh(&ecm_nss_ipv6_lock); 2034 ecm_db_connection_deref(nci); 2035 } else { 2036 ecm_db_timer_group_t tg; 2037 ecm_tracker_sender_state_t src_state; 2038 ecm_tracker_sender_state_t dest_state; 2039 ecm_tracker_connection_state_t state; 2040 struct ecm_tracker_instance *ti; 2041 2042 /* 2043 * Ask tracker for timer group to set the connection to initially. 2044 */ 2045 ti = dci->tracker_get_and_ref(dci); 2046 ti->state_get(ti, &src_state, &dest_state, &state, &tg); 2047 ti->deref(ti); 2048 2049 /* 2050 * Add the new connection we created into the database 2051 * NOTE: assign to a short timer group for now - it is the assigned classifiers responsibility to do this 2052 */ 2053 ecm_db_connection_add(nci, feci, src_mi, dest_mi, src_mi, dest_mi, 2054 src_ni, dest_ni, src_ni, dest_ni, 2055 6, protocol, ecm_dir, 2056 NULL /* final callback */, 2057 ecm_nss_ported_ipv6_connection_defunct_callback, 2058 tg, is_routed, nci); 2059 2060 spin_unlock_bh(&ecm_nss_ipv6_lock); 2061 2062 ci = nci; 2063 DEBUG_INFO("%p: New ported connection created\n", ci); 2064 } 2065 2066 /* 2067 * No longer need referenecs to the objects we created 2068 */ 2069 dci->base.deref((struct ecm_classifier_instance *)dci); 2070 ecm_db_mapping_deref(dest_mi); 2071 ecm_db_node_deref(dest_ni); 2072 ecm_db_mapping_deref(src_mi); 2073 ecm_db_node_deref(src_ni); 2074 feci->deref(feci); 2075 } 2076 2077 /* 2078 * Keep connection alive as we have seen activity 2079 */ 2080 if (!ecm_db_connection_defunct_timer_touch(ci)) { 2081 ecm_db_connection_deref(ci); 2082 return NF_ACCEPT; 2083 } 2084 2085 /* 2086 * Identify which side of the connection is sending 2087 * NOTE: This may be different than what sender is at the moment 2088 * given the connection we have located. 2089 */ 2090 ecm_db_connection_from_address_get(ci, match_addr); 2091 if (ECM_IP_ADDR_MATCH(ip_src_addr, match_addr)) { 2092 sender = ECM_TRACKER_SENDER_TYPE_SRC; 2093 } else { 2094 sender = ECM_TRACKER_SENDER_TYPE_DEST; 2095 } 2096 2097 /* 2098 * Do we need to action generation change? 2099 */ 2100 if (unlikely(ecm_db_connection_regeneration_required_check(ci))) { 2101 ecm_nss_ipv6_connection_regenerate(ci, sender, out_dev, in_dev); 2102 } 2103 2104 /* 2105 * Iterate the assignments and call to process! 2106 * Policy implemented: 2107 * 1. Classifiers that say they are not relevant are unassigned and not actioned further. 2108 * 2. Any drop command from any classifier is honoured. 2109 * 3. All classifiers must action acceleration for accel to be honoured, any classifiers not sure of their relevance will stop acceleration. 2110 * 4. Only the highest priority classifier, that actions it, will have its qos tag honoured. 2111 * 5. Only the highest priority classifier, that actions it, will have its timer group honoured. 2112 */ 2113 DEBUG_TRACE("%p: process begin, skb: %p\n", ci, skb); 2114 prevalent_pr.process_actions = 0; 2115 prevalent_pr.drop = false; 2116 prevalent_pr.flow_qos_tag = skb->priority; 2117 prevalent_pr.return_qos_tag = skb->priority; 2118 prevalent_pr.accel_mode = ECM_CLASSIFIER_ACCELERATION_MODE_ACCEL; 2119 prevalent_pr.timer_group = ci_orig_timer_group = ecm_db_connection_timer_group_get(ci); 2120 2121 assignment_count = ecm_db_connection_classifier_assignments_get_and_ref(ci, assignments); 2122 for (aci_index = 0; aci_index < assignment_count; ++aci_index) { 2123 struct ecm_classifier_process_response aci_pr; 2124 struct ecm_classifier_instance *aci; 2125 2126 aci = assignments[aci_index]; 2127 DEBUG_TRACE("%p: process: %p, type: %d\n", ci, aci, aci->type_get(aci)); 2128 aci->process(aci, sender, iph, skb, &aci_pr); 2129 DEBUG_TRACE("%p: aci_pr: process actions: %x, became relevant: %u, relevance: %d, drop: %d, " 2130 "flow_qos_tag: %u, return_qos_tag: %u, accel_mode: %x, timer_group: %d\n", 2131 ci, aci_pr.process_actions, aci_pr.became_relevant, aci_pr.relevance, aci_pr.drop, 2132 aci_pr.flow_qos_tag, aci_pr.return_qos_tag, aci_pr.accel_mode, aci_pr.timer_group); 2133 2134 if (aci_pr.relevance == ECM_CLASSIFIER_RELEVANCE_NO) { 2135 ecm_classifier_type_t aci_type; 2136 2137 /* 2138 * This classifier can be unassigned - PROVIDED it is not the default classifier 2139 */ 2140 aci_type = aci->type_get(aci); 2141 if (aci_type == ECM_CLASSIFIER_TYPE_DEFAULT) { 2142 continue; 2143 } 2144 2145 DEBUG_INFO("%p: Classifier not relevant, unassign: %d", ci, aci_type); 2146 ecm_db_connection_classifier_unassign(ci, aci); 2147 continue; 2148 } 2149 2150 /* 2151 * Yes or Maybe relevant. 2152 */ 2153 if (aci_pr.process_actions & ECM_CLASSIFIER_PROCESS_ACTION_DROP) { 2154 /* 2155 * Drop command from any classifier is actioned. 2156 */ 2157 DEBUG_TRACE("%p: wants drop: %p, type: %d, skb: %p\n", ci, aci, aci->type_get(aci), skb); 2158 prevalent_pr.drop |= aci_pr.drop; 2159 } 2160 2161 /* 2162 * Accel mode permission 2163 */ 2164 if (aci_pr.relevance == ECM_CLASSIFIER_RELEVANCE_MAYBE) { 2165 /* 2166 * Classifier not sure of its relevance - cannot accel yet 2167 */ 2168 DEBUG_TRACE("%p: accel denied by maybe: %p, type: %d\n", ci, aci, aci->type_get(aci)); 2169 prevalent_pr.accel_mode = ECM_CLASSIFIER_ACCELERATION_MODE_NO; 2170 } else { 2171 if (aci_pr.process_actions & ECM_CLASSIFIER_PROCESS_ACTION_ACCEL_MODE) { 2172 if (aci_pr.accel_mode == ECM_CLASSIFIER_ACCELERATION_MODE_NO) { 2173 DEBUG_TRACE("%p: accel denied: %p, type: %d\n", ci, aci, aci->type_get(aci)); 2174 prevalent_pr.accel_mode = ECM_CLASSIFIER_ACCELERATION_MODE_NO; 2175 } 2176 /* else yes or don't care about accel */ 2177 } 2178 } 2179 2180 /* 2181 * Timer group (the last classifier i.e. the highest priority one) will 'win' 2182 */ 2183 if (aci_pr.process_actions & ECM_CLASSIFIER_PROCESS_ACTION_TIMER_GROUP) { 2184 DEBUG_TRACE("%p: timer group: %p, type: %d, group: %d\n", ci, aci, aci->type_get(aci), aci_pr.timer_group); 2185 prevalent_pr.timer_group = aci_pr.timer_group; 2186 } 2187 2188 /* 2189 * Qos tag (the last classifier i.e. the highest priority one) will 'win' 2190 */ 2191 if (aci_pr.process_actions & ECM_CLASSIFIER_PROCESS_ACTION_QOS_TAG) { 2192 DEBUG_TRACE("%p: aci: %p, type: %d, flow qos tag: %u, return qos tag: %u\n", 2193 ci, aci, aci->type_get(aci), aci_pr.flow_qos_tag, aci_pr.return_qos_tag); 2194 prevalent_pr.flow_qos_tag = aci_pr.flow_qos_tag; 2195 prevalent_pr.return_qos_tag = aci_pr.return_qos_tag; 2196 } 2197 2198#ifdef ECM_CLASSIFIER_DSCP_ENABLE 2199 /* 2200 * If any classifier denied DSCP remarking then that overrides every classifier 2201 */ 2202 if (aci_pr.process_actions & ECM_CLASSIFIER_PROCESS_ACTION_DSCP_DENY) { 2203 DEBUG_TRACE("%p: aci: %p, type: %d, DSCP remark denied\n", 2204 ci, aci, aci->type_get(aci)); 2205 prevalent_pr.process_actions |= ECM_CLASSIFIER_PROCESS_ACTION_DSCP_DENY; 2206 prevalent_pr.process_actions &= ~ECM_CLASSIFIER_PROCESS_ACTION_DSCP; 2207 } 2208 2209 /* 2210 * DSCP remark action, but only if it has not been denied by any classifier 2211 */ 2212 if (aci_pr.process_actions & ECM_CLASSIFIER_PROCESS_ACTION_DSCP) { 2213 if (!(prevalent_pr.process_actions & ECM_CLASSIFIER_PROCESS_ACTION_DSCP_DENY)) { 2214 DEBUG_TRACE("%p: aci: %p, type: %d, DSCP remark wanted, flow_dscp: %u, return dscp: %u\n", 2215 ci, aci, aci->type_get(aci), aci_pr.flow_dscp, aci_pr.return_dscp); 2216 prevalent_pr.process_actions |= ECM_CLASSIFIER_PROCESS_ACTION_DSCP; 2217 prevalent_pr.flow_dscp = aci_pr.flow_dscp; 2218 prevalent_pr.return_dscp = aci_pr.return_dscp; 2219 } 2220 } 2221#endif 2222 } 2223 ecm_db_connection_assignments_release(assignment_count, assignments); 2224 2225 /* 2226 * Change timer group? 2227 */ 2228 if (ci_orig_timer_group != prevalent_pr.timer_group) { 2229 DEBUG_TRACE("%p: change timer group from: %d to: %d\n", ci, ci_orig_timer_group, prevalent_pr.timer_group); 2230 ecm_db_connection_defunct_timer_reset(ci, prevalent_pr.timer_group); 2231 } 2232 2233 /* 2234 * Drop? 2235 */ 2236 if (prevalent_pr.drop) { 2237 DEBUG_TRACE("%p: drop: %p\n", ci, skb); 2238 ecm_db_connection_data_totals_update_dropped(ci, (sender == ECM_TRACKER_SENDER_TYPE_SRC)? true : false, skb->len, 1); 2239 ecm_db_connection_deref(ci); 2240 return NF_ACCEPT; 2241 } 2242 ecm_db_connection_data_totals_update(ci, (sender == ECM_TRACKER_SENDER_TYPE_SRC)? true : false, skb->len, 1); 2243 2244 /* 2245 * Assign qos tag 2246 * GGG TODO Should we use sender to identify whether to use flow or return qos tag? 2247 */ 2248 skb->priority = prevalent_pr.flow_qos_tag; 2249 DEBUG_TRACE("%p: skb priority: %u\n", ci, skb->priority); 2250 2251 /* 2252 * Accelerate? 2253 */ 2254 if (prevalent_pr.accel_mode == ECM_CLASSIFIER_ACCELERATION_MODE_ACCEL) { 2255 struct ecm_front_end_connection_instance *feci; 2256 DEBUG_TRACE("%p: accel\n", ci); 2257 feci = ecm_db_connection_front_end_get_and_ref(ci); 2258 ecm_nss_ported_ipv6_connection_accelerate(feci, &prevalent_pr, ct, is_l2_encap); 2259 feci->deref(feci); 2260 } 2261 ecm_db_connection_deref(ci); 2262 2263 return NF_ACCEPT; 2264} 2265 2266/* 2267 * ecm_nss_ported_ipv6_debugfs_init() 2268 */ 2269bool ecm_nss_ported_ipv6_debugfs_init(struct dentry *dentry) 2270{ 2271 struct dentry *udp_dentry; 2272 2273 udp_dentry = debugfs_create_u32("udp_accelerated_count", S_IRUGO, dentry, 2274 &ecm_nss_ported_ipv6_accelerated_count[ECM_NSS_PORTED_IPV6_PROTO_UDP]); 2275 if (!udp_dentry) { 2276 DEBUG_ERROR("Failed to create ecm nss ipv6 udp_accelerated_count file in debugfs\n"); 2277 return false; 2278 } 2279 2280 if (!debugfs_create_u32("tcp_accelerated_count", S_IRUGO, dentry, 2281 &ecm_nss_ported_ipv6_accelerated_count[ECM_NSS_PORTED_IPV6_PROTO_TCP])) { 2282 DEBUG_ERROR("Failed to create ecm nss ipv6 tcp_accelerated_count file in debugfs\n"); 2283 debugfs_remove(udp_dentry); 2284 return false; 2285 } 2286 2287 return true; 2288} 2289 2290