Project import generated by Copybara.
GitOrigin-RevId: c30409efaf0fcac98d9d2d62f0b5623b3a4b6897
diff --git a/build_scripts/build_all.sh b/build_scripts/build_all.sh
index 111d8c1..95d06f7 100755
--- a/build_scripts/build_all.sh
+++ b/build_scripts/build_all.sh
@@ -127,6 +127,10 @@
./build.sh ${product} ${eureka_src_path}
popd
+ pushd ${top_dir}/sdk/nat46
+ ./build.sh ${product} ${eureka_src_path}
+ popd
+
pushd ${top_dir}/sdk/qca-nss-sfe
./build.sh ${product} ${eureka_src_path}
popd
diff --git a/build_scripts/release_oss.sh b/build_scripts/release_oss.sh
index 732d06e..ffefc33 100755
--- a/build_scripts/release_oss.sh
+++ b/build_scripts/release_oss.sh
@@ -18,9 +18,9 @@
rsync -av ${src}/ ${dst} --exclude .git
}
-# Release source code under ./bootloader
+# Release source code under ./u-boot
function release_bootloader() {
- src=$1/bootloader
+ src=$1/u-boot
dst=$2/u-boot
echo "Copying bootloader from $src ==> $dst..."
diff --git a/build_scripts/setup_env.sh b/build_scripts/setup_env.sh
index a658366..b223268 100644
--- a/build_scripts/setup_env.sh
+++ b/build_scripts/setup_env.sh
@@ -25,7 +25,7 @@
if [ ! -d "${TOP_DIR}" ]; then
TOP_DIR="$(readlink -e $(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/../..)"
fi
-ENABLE_64BIT_BUILD=${ENABLE_64BIT_BUILD:-"false"}
+ENABLE_64BIT_BUILD=${ENABLE_64BIT_BUILD:-"true"}
_toolchain_dir=$(readlink -e ${TOP_DIR}/prebuilt/toolchain)
_num_jobs=$(grep -c processor /proc/cpuinfo)
diff --git a/qca-nss-dp/Makefile b/qca-nss-dp/Makefile
index 157342b..3b00d77 100644
--- a/qca-nss-dp/Makefile
+++ b/qca-nss-dp/Makefile
@@ -44,7 +44,7 @@
hal/dp_ops/syn_gmac_dp/syn_dp.o \
hal/gmac_ops/syn/gmac/syn_if.o
NSS_DP_INCLUDE += -I$(obj)/hal/dp_ops/syn_gmac_dp/include
-ccflags-y += -DNSS_DP_IPQ50XX
+ccflags-y += -DNSS_DP_IPQ50XX -DNSS_DP_ENABLE_NAPI_GRO
endif
ifeq ($(SoC),$(filter $(SoC),ipq95xx))
@@ -60,7 +60,7 @@
hal/gmac_ops/syn/xgmac/syn_if.o
NSS_DP_INCLUDE += -I$(obj)/hal/dp_ops/edma_dp/edma_v2
NSS_DP_INCLUDE += -I$(obj)/hal/dp_ops/edma_dp/edma_v2/include
-ccflags-y += -DNSS_DP_IPQ95XX -DNSS_DP_PPE_SUPPORT
+ccflags-y += -DNSS_DP_IPQ95XX -DNSS_DP_PPE_SUPPORT -DNSS_DP_ENABLE_NAPI_GRO
ifneq ($(CONFIG_NET_SWITCHDEV),)
qca-nss-dp-objs += nss_dp_switchdev.o
ccflags-y += -DNSS_DP_PPE_SWITCHDEV
diff --git a/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp.c b/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp.c
index 4996653..ba6aa57 100644
--- a/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp.c
+++ b/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp.c
@@ -335,8 +335,6 @@
return NETDEV_TX_OK;
}
- dp_global_ctx.tx_requeue_stop = 1;
-
/*
* Handle the scenario when descriptors are not enough.
* Only one DMA channel is supported to assume queue 0.
diff --git a/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp_rx.c b/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp_rx.c
index 529c77e..a695cf1 100644
--- a/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp_rx.c
+++ b/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp_rx.c
@@ -381,7 +381,6 @@
struct dma_desc_rx *rx_desc_next = NULL;
uint8_t *next_skb_ptr;
skb_frag_t *frag = NULL;
- bool is_gro_enabled = netdev->features & NETIF_F_GRO;
busy = atomic_read((atomic_t *)&rx_info->busy_rx_desc_cnt);
if (unlikely(!busy)) {
@@ -496,12 +495,11 @@
/*
* Deliver the packet to linux
*/
- if (is_gro_enabled) {
- napi_gro_receive(&rx_info->napi_rx, rx_skb);
- } else {
- netif_receive_skb(rx_skb);
- }
-
+#if defined(NSS_DP_ENABLE_NAPI_GRO)
+ napi_gro_receive(&rx_info->napi_rx, rx_skb);
+#else
+ netif_receive_skb(rx_skb);
+#endif
goto next_desc;
}
@@ -544,13 +542,11 @@
prefetch(next_skb_ptr + SYN_DP_RX_SKB_CACHE_LINE1);
prefetch(next_skb_ptr + SYN_DP_RX_SKB_CACHE_LINE3);
}
-
- if (is_gro_enabled) {
- napi_gro_receive(&rx_info->napi_rx, rx_skb);
- } else {
- netif_receive_skb(rx_skb);
- }
-
+#if defined(NSS_DP_ENABLE_NAPI_GRO)
+ napi_gro_receive(&rx_info->napi_rx, rx_skb);
+#else
+ netif_receive_skb(rx_skb);
+#endif
goto next_desc;
}
@@ -605,12 +601,11 @@
prefetch(next_skb_ptr + SYN_DP_RX_SKB_CACHE_LINE3);
}
- if (is_gro_enabled) {
- napi_gro_receive(&rx_info->napi_rx, rx_info->head);
- } else {
- netif_receive_skb(rx_info->head);
- }
-
+#if defined(NSS_DP_ENABLE_NAPI_GRO)
+ napi_gro_receive(&rx_info->napi_rx, rx_info->head);
+#else
+ netif_receive_skb(rx_info->head);
+#endif
rx_info->head = NULL;
goto next_desc;
}
diff --git a/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp_rx.h b/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp_rx.h
index 8f0105e..bb6d005 100644
--- a/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp_rx.h
+++ b/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp_rx.h
@@ -19,8 +19,8 @@
#ifndef __NSS_DP_SYN_DP_RX__
#define __NSS_DP_SYN_DP_RX__
-#define SYN_DP_NAPI_BUDGET_RX 64
-#define SYN_DP_RX_DESC_SIZE 2048 /* Rx Descriptors needed in the descriptor pool/queue */
+#define SYN_DP_NAPI_BUDGET_RX 32
+#define SYN_DP_RX_DESC_SIZE 128 /* Rx Descriptors needed in the descriptor pool/queue */
#define SYN_DP_RX_DESC_MAX_INDEX (SYN_DP_RX_DESC_SIZE - 1)
/*
diff --git a/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp_tx.h b/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp_tx.h
index 6a5b88a..458fcd4 100644
--- a/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp_tx.h
+++ b/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp_tx.h
@@ -18,8 +18,8 @@
#ifndef __NSS_DP_SYN_DP_TX__
#define __NSS_DP_SYN_DP_TX__
-#define SYN_DP_NAPI_BUDGET_TX 64
-#define SYN_DP_TX_DESC_SIZE 8192 /* Tx Descriptors needed in the descriptor pool/queue */
+#define SYN_DP_NAPI_BUDGET_TX 32
+#define SYN_DP_TX_DESC_SIZE 1024 /* Tx Descriptors needed in the descriptor pool/queue */
#define SYN_DP_TX_DESC_MAX_INDEX (SYN_DP_TX_DESC_SIZE - 1)
#define SYN_DP_TX_INVALID_DESC_INDEX SYN_DP_TX_DESC_SIZE
diff --git a/qca-nss-ecm/Makefile b/qca-nss-ecm/Makefile
index 8d3d02f..b34e832 100644
--- a/qca-nss-ecm/Makefile
+++ b/qca-nss-ecm/Makefile
@@ -19,9 +19,6 @@
# Makefile for the QCA NSS ECM
# ###################################################
-ifeq ($(ECM_FRONT_END_SFE_ENABLE), y)
-obj-m += examples/ecm_sfe_l2.o
-endif
obj-m +=examples/ecm_ae_select.o
obj-m += ecm.o
diff --git a/qca-nss-ecm/build.sh b/qca-nss-ecm/build.sh
index 452d82c..0c67b1b 100755
--- a/qca-nss-ecm/build.sh
+++ b/qca-nss-ecm/build.sh
@@ -20,8 +20,9 @@
kernel_path=$(readlink -e ${sdk_top_dir}/../kernel)
qca_sfe_path=$(readlink -e ${sdk_top_dir}/qca-nss-sfe/)
+nat46_path=$(readlink -e ${sdk_top_dir}/nat46/nat46/modules)
soc_type=ipq50xx
-extra_cflags="-I${qca_sfe_path}/exports"
+extra_cflags="-I${qca_sfe_path}/exports -I${nat46_path}"
build_flags="ECM_CLASSIFIER_HYFI_ENABLE=n ECM_MULTICAST_ENABLE=n ECM_INTERFACE_IPSEC_ENABLE=n ECM_INTERFACE_PPTP_ENABLE=n ECM_INTERFACE_L2TPV2_ENABLE=n ECM_INTERFACE_GRE_TAP_ENABLE=n ECM_INTERFACE_GRE_TUN_ENABLE=n ECM_INTERFACE_SIT_ENABLE=n ECM_INTERFACE_TUNIPIP6_ENABLE=n ECM_INTERFACE_RAWIP_ENABLE=n ECM_INTERFACE_BOND_ENABLE=n ECM_XFRM_ENABLE=n ECM_FRONT_END_SFE_ENABLE=y ECM_NON_PORTED_SUPPORT_ENABLE=n ECM_INTERFACE_MAP_T_ENABLE=n ECM_INTERFACE_VXLAN_ENABLE=n ECM_INTERFACE_OVS_BRIDGE_ENABLE=n ECM_CLASSIFIER_OVS_ENABLE=n ECM_CLASSIFIER_DSCP_IGS=n ECM_IPV6_ENABLE=y ECM_FRONT_END_NSS_ENABLE=n EXAMPLES_BUILD_OVS=n"
@@ -32,7 +33,7 @@
# make kernel module
echo "Build ${MODULE_NAME}"
${CROSS_MAKE} -C ${kernel_path} M=${sdk_top_dir}/${MODULE_NAME} ${build_flags} SoC=${soc_type} EXTRA_CFLAGS="${extra_cflags}" \
- KBUILD_EXTRA_SYMBOLS="${qca_sfe_path}/Module.symvers" V=1
+ KBUILD_EXTRA_SYMBOLS="${qca_sfe_path}/Module.symvers ${nat46_path}/Module.symvers" V=1
}
##################################################
@@ -62,7 +63,6 @@
local module_target_dir="$(GetModulePath ${eureka_src_path} ${product})"
mkdir -p ${module_target_dir}
cp -f ecm.ko ${module_target_dir}/${MODULE_NAME}.ko
- cp -f examples/ecm_sfe_l2.ko ${module_target_dir}
}
function Usage() {
diff --git a/qca-nss-ecm/ecm_db/ecm_db_connection.c b/qca-nss-ecm/ecm_db/ecm_db_connection.c
index fd68f23..869e35d 100644
--- a/qca-nss-ecm/ecm_db/ecm_db_connection.c
+++ b/qca-nss-ecm/ecm_db/ecm_db_connection.c
@@ -279,19 +279,20 @@
DEBUG_INFO("%px: defunct timer expired\n", ci);
+ /*
+ * If defunct fails, return. Do not remove the last ref count. This failure means
+ * it will be re-tried later with the ecm_db_connection_make_defunct function
+ * until the total failure count reaches to the max limit which is 250.
+ * When the limit is reached, defunct process will return true and let
+ * the connection goes off.
+ */
ret = ci->defunct(ci->feci, &accel_mode);
/*
- * If the returned 'ret' is success, this means this callback succeeded to
- * defunct the connection and it can release the last reference.
- * If it fails, this means that another defunct process defuncted the connection
- * before this callback. In that case, we will check the accel_mode of the connection.
- * If the other call defuncted the connection successfully, it will set the accel_mode to
- * ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT_SHORT for s short amount of time to avoid
- * further accel/decel attempts. So, in this accel_mode, this callback shouldn't release the
- * last reference. It will be released by the ecm_db_connection_make_defunct() function.
+ * Release the last reference of this connection. This reference is the one
+ * which was held when the connection was allocated.
*/
- if (ret || (ECM_FRONT_END_ACCELERATION_FAILED(accel_mode) && (accel_mode != ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT_SHORT))) {
+ if (ret || ECM_FRONT_END_ACCELERATION_FAILED(accel_mode)) {
ecm_db_connection_deref(ci);
}
}
diff --git a/qca-nss-ecm/ecm_db/ecm_db_node.c b/qca-nss-ecm/ecm_db/ecm_db_node.c
index 18d6aba..c3d70be 100644
--- a/qca-nss-ecm/ecm_db/ecm_db_node.c
+++ b/qca-nss-ecm/ecm_db/ecm_db_node.c
@@ -1,12 +1,9 @@
/*
**************************************************************************
* Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
- *
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
- *
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
@@ -715,17 +712,6 @@
EXPORT_SYMBOL(ecm_db_node_hash_index_get_first);
/*
- * ecm_db_node_get_connections_count()
- * Returns the connections count on the node in the given direction.
- */
-int ecm_db_node_get_connections_count(struct ecm_db_node_instance *ni, ecm_db_obj_dir_t dir)
-{
- DEBUG_CHECK_MAGIC(ni, ECM_DB_NODE_INSTANCE_MAGIC, "%px: magic failed\n", ni);
-
- return ni->connections_count[dir];
-}
-
-/*
* ecm_db_node_alloc()
* Allocate a node instance
*/
diff --git a/qca-nss-ecm/ecm_db/ecm_db_node.h b/qca-nss-ecm/ecm_db/ecm_db_node.h
index 1661ccd..a2c0e03 100644
--- a/qca-nss-ecm/ecm_db/ecm_db_node.h
+++ b/qca-nss-ecm/ecm_db/ecm_db_node.h
@@ -1,12 +1,9 @@
/*
**************************************************************************
* Copyright (c) 2014-2020, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
- *
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
- *
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
@@ -128,8 +125,6 @@
int ecm_db_node_hash_index_get_first(void);
#endif
-int ecm_db_node_get_connections_count(struct ecm_db_node_instance *ni, ecm_db_obj_dir_t dir);
-
void ecm_db_node_ovs_connections_masked_defunct(int ip_ver, uint8_t *src_mac, bool src_mac_check, ip_addr_t src_addr_mask,
uint16_t src_port_mask, uint8_t *dest_mac, bool dest_mac_check,
ip_addr_t dest_addr_mask, uint16_t dest_port_mask,
diff --git a/qca-nss-ecm/ecm_interface.c b/qca-nss-ecm/ecm_interface.c
index 52bd11b..b46233b 100644
--- a/qca-nss-ecm/ecm_interface.c
+++ b/qca-nss-ecm/ecm_interface.c
@@ -6424,12 +6424,15 @@
*/
if ((is_ported || ecm_db_connection_is_pppoe_bridged_get(ci)) &&
is_valid_ether_addr(mac_addr) && ecm_front_end_is_bridge_port(dev) && rx_packets) {
-
DEBUG_TRACE("Update bridge fdb entry for mac: %pM\n", mac_addr);
+
/*
- * Update the existing fdb entry's timestamp only.
+ * Update fdb entry only if it exist. Please note that br_refresh_fdb_entry() API
+ * creates new fdb entry if it does not exist.
*/
- br_fdb_entry_refresh(dev, mac_addr, 0);
+ if (br_fdb_has_entry(dev, mac_addr, 0)) {
+ br_refresh_fdb_entry(dev, mac_addr);
+ }
}
}
@@ -7017,12 +7020,7 @@
* FROM_NAT and TO_NAT have the same list of connections.
*/
for (dir = 0; dir <= ECM_DB_OBJ_DIR_TO; dir++) {
- /*
- * If there is connection on this node, call the defunct function.
- */
- if (ecm_db_node_get_connections_count(ni, dir)) {
- ecm_db_traverse_node_connection_list_and_defunct(ni, dir, ip_version);
- }
+ ecm_db_traverse_node_connection_list_and_defunct(ni, dir, ip_version);
}
}
diff --git a/qca-nss-ecm/ecm_types.h b/qca-nss-ecm/ecm_types.h
index 3777688..6ec2fec 100644
--- a/qca-nss-ecm/ecm_types.h
+++ b/qca-nss-ecm/ecm_types.h
@@ -192,13 +192,13 @@
#define ECM_LINUX6_TO_IP_ADDR(d,s) \
{ \
ecm_type_check_ecm_ip_addr(d); \
- ecm_type_check_ae_ipv6(s); \
+ ecm_type_check_ae_ipv6(&s); \
__ECM_IP_ADDR_COPY_NO_CHECK(d,s); \
}
#define ECM_IP_ADDR_TO_LINUX6(d,s) \
{ \
- ecm_type_check_ae_ipv6(d); \
+ ecm_type_check_ae_ipv6(&d); \
ecm_type_check_ecm_ip_addr(s); \
__ECM_IP_ADDR_COPY_NO_CHECK(d,s); \
}
diff --git a/qca-nss-ecm/examples/ecm_sfe_l2.c b/qca-nss-ecm/examples/ecm_sfe_l2.c
deleted file mode 100644
index dd64653..0000000
--- a/qca-nss-ecm/examples/ecm_sfe_l2.c
+++ /dev/null
@@ -1,1085 +0,0 @@
-/*
- **************************************************************************
- * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
-
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- **************************************************************************
- */
-#include <linux/module.h>
-
-#include <linux/version.h>
-#include <linux/types.h>
-#include <linux/debugfs.h>
-#include <linux/string.h>
-#include <linux/ctype.h>
-#include <linux/etherdevice.h>
-#include <linux/inet.h>
-
-#include "exports/ecm_sfe_common_public.h"
-
-/*
- * Global WAN interface name parameter.
- */
-char wan_name[IFNAMSIZ];
-int wan_name_len;
-
-/*
- * DebugFS entry object.
- */
-static struct dentry *ecm_sfe_l2_dentry;
-
-/*
- * Policy rule directions.
- */
-enum ecm_sfe_l2_policy_rule_dir {
- ECM_SFE_L2_POLICY_RULE_EGRESS = 1,
- ECM_SFE_L2_POLICY_RULE_INGRESS,
- ECM_SFE_L2_POLICY_RULE_EGRESS_INGRESS,
-};
-
-/*
- * Policy rule commands.
- */
-enum ecm_sfe_l2_policy_rule_cmd {
- ECM_SFE_L2_POLICY_RULE_ADD = 1,
- ECM_SFE_L2_POLICY_RULE_DEL,
- ECM_SFE_L2_POLICY_RULE_FLUSH_ALL
-};
-
-/*
- * ECM tuple directions.
- */
-enum ecm_sfe_l2_tuple_dir {
- ECM_SFE_L2_TUPLE_DIR_ORIGINAL,
- ECM_SFE_L2_TUPLE_DIR_REPLY,
-};
-
-/*
- * Defunct by 5-tuple command option types.
- */
-enum ecm_sfe_l2_defunct_by_5tuple_options {
- ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_IP_VERSION,
- ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_SIP,
- ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_SPORT,
- ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_DIP,
- ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_DPORT,
- ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_PROTOCOL,
- ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_MAX,
-};
-
-/*
- * Policy rule structure
- */
-struct ecm_sfe_l2_policy_rule {
- struct list_head list;
- int protocol;
- int src_port;
- int dest_port;
- uint32_t src_addr[4];
- uint32_t dest_addr[4];
- int ip_ver;
- enum ecm_sfe_l2_policy_rule_dir direction;
-};
-
-LIST_HEAD(ecm_sfe_l2_policy_rules);
-DEFINE_SPINLOCK(ecm_sfe_l2_policy_rules_lock);
-
-/*
- * ecm_sfe_l2_policy_rule_find()
- * Finds a policy rule with the given parameters.
- */
-static struct ecm_sfe_l2_policy_rule *ecm_sfe_l2_policy_rule_find(int ip_ver, uint32_t *sip_addr, int sport,
- uint32_t *dip_addr, int dport,
- int protocol)
-{
- struct ecm_sfe_l2_policy_rule *rule = NULL;
-
- list_for_each_entry(rule , &ecm_sfe_l2_policy_rules, list) {
- if (rule->ip_ver != ip_ver)
- continue;
-
- if (rule->protocol && (rule->protocol != protocol))
- continue;
-
- if (rule->dest_port && (rule->dest_port != dport))
- continue;
-
- if (rule->src_port && (rule->src_port != sport))
- continue;
-
- if (ip_ver == 4) {
- if (rule->dest_addr[0] && (rule->dest_addr[0] != dip_addr[0]))
- continue;
- } else {
- if (rule->dest_addr[0] && memcmp(rule->dest_addr, dip_addr, sizeof(uint32_t) * 4))
- continue;
- }
-
- if (ip_ver == 4) {
- if (rule->src_addr[0] && (rule->src_addr[0] != sip_addr[0]))
- continue;
- } else {
- if (rule->src_addr[0] && memcmp(rule->src_addr, sip_addr, sizeof(uint32_t) * 4))
- continue;
- }
-
- return rule;
- }
- return NULL;
-}
-
-/*
- * ecm_sfe_l2_connection_check_with_policy_rules()
- * Checks the ECM tuple with the policy rules in our rules list and
- * set the L2 acceleration accordingly, if there is a match.
- */
-static uint32_t ecm_sfe_l2_connection_check_with_policy_rules(struct ecm_sfe_common_tuple *tuple, enum ecm_sfe_l2_tuple_dir tuple_dir)
-{
- struct ecm_sfe_l2_policy_rule *rule = NULL;
- enum ecm_sfe_l2_policy_rule_dir direction;
- uint32_t l2_accel_bits = (ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED | ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED);
-
- if (tuple_dir == ECM_SFE_L2_TUPLE_DIR_ORIGINAL) {
- spin_lock_bh(&ecm_sfe_l2_policy_rules_lock);
- rule = ecm_sfe_l2_policy_rule_find(tuple->ip_ver, tuple->src_addr, tuple->src_port,
- tuple->dest_addr, tuple->dest_port, tuple->protocol);
- if (!rule) {
- spin_unlock_bh(&ecm_sfe_l2_policy_rules_lock);
- pr_warn("No rule with this tuple\n");
- goto done;
- }
- direction = rule->direction;
- spin_unlock_bh(&ecm_sfe_l2_policy_rules_lock);
-
- if (direction == ECM_SFE_L2_POLICY_RULE_EGRESS) {
- pr_debug("flow side should be L3 interface\n");
- l2_accel_bits &= ~ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED;
- } else if (direction == ECM_SFE_L2_POLICY_RULE_INGRESS) {
- pr_debug("return side should be L3 interface\n");
- l2_accel_bits &= ~ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED;
- }
- } else if (tuple_dir == ECM_SFE_L2_TUPLE_DIR_REPLY) {
- spin_lock_bh(&ecm_sfe_l2_policy_rules_lock);
- rule = ecm_sfe_l2_policy_rule_find(tuple->ip_ver, tuple->dest_addr, tuple->dest_port,
- tuple->src_addr, tuple->src_port, tuple->protocol);
-
- if (!rule) {
- spin_unlock_bh(&ecm_sfe_l2_policy_rules_lock);
- pr_warn("No rule with this tuple\n");
- goto done;
- }
- direction = rule->direction;
- spin_unlock_bh(&ecm_sfe_l2_policy_rules_lock);
-
- if (direction == ECM_SFE_L2_POLICY_RULE_EGRESS) {
- pr_debug("return side should be L3 interface\n");
- l2_accel_bits &= ~ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED;
- } else if (direction == ECM_SFE_L2_POLICY_RULE_INGRESS) {
- pr_debug("flow side should be L3 interface\n");
- l2_accel_bits &= ~ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED;
- }
- } else {
- pr_err("unknow tuple_dir: %d\n", tuple_dir);
- goto done;
- }
-
- if (direction == ECM_SFE_L2_POLICY_RULE_EGRESS_INGRESS) {
- pr_debug("both sides should be L3 interface\n");
- l2_accel_bits &= ~ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED;
- l2_accel_bits &= ~ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED;
- }
-done:
- return l2_accel_bits;
-}
-
-/*
- * ecm_sfe_l2_accel_check_callback()
- * L2 acceleration check function callback.
- */
-uint32_t ecm_sfe_l2_accel_check_callback(struct ecm_sfe_common_tuple *tuple)
-{
- struct net_device *flow_dev;
- struct net_device *return_dev;
- struct net_device *wan_dev;
- uint32_t l2_accel_bits = (ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED | ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED);
-
- if (strlen(wan_name) == 0) {
- pr_debug("WAN interface is not set in the debugfs\n");
- goto done;
- }
-
- wan_dev = dev_get_by_name(&init_net, wan_name);
- if (!wan_dev) {
- pr_debug("WAN interface: %s couldn't be found\n", wan_name);
- goto done;
- }
-
- flow_dev = dev_get_by_index(&init_net, tuple->src_ifindex);
- if (!flow_dev) {
- pr_debug("flow netdevice couldn't be found with index: %d\n", tuple->src_ifindex);
- dev_put(wan_dev);
- goto done;
- }
-
- return_dev = dev_get_by_index(&init_net, tuple->dest_ifindex);
- if (!return_dev) {
- pr_debug("return netdevice couldn't be found with index: %d\n", tuple->dest_ifindex);
- dev_put(wan_dev);
- dev_put(flow_dev);
- goto done;
- }
-
- if (wan_dev == return_dev) {
- /*
- * Check the tuple with the policy rules in the ORIGINAL direction of the tuple.
- */
- l2_accel_bits = ecm_sfe_l2_connection_check_with_policy_rules(tuple, ECM_SFE_L2_TUPLE_DIR_ORIGINAL);
- } else if (wan_dev == flow_dev) {
- /*
- * Check the tuple with the policy rules in the REPLY direction of the tuple.
- */
- l2_accel_bits = ecm_sfe_l2_connection_check_with_policy_rules(tuple, ECM_SFE_L2_TUPLE_DIR_REPLY);
- }
- dev_put(wan_dev);
- dev_put(flow_dev);
- dev_put(return_dev);
-
-done:
- return l2_accel_bits;
-}
-
-/*
- * ecm_sfe_l2_flush_policy_rules()
- * Flushes all the policy rules.
- */
-static void ecm_sfe_l2_flush_policy_rules(void)
-{
- struct ecm_sfe_l2_policy_rule *rule;
- struct ecm_sfe_l2_policy_rule *tmp;
-
- spin_lock_bh(&ecm_sfe_l2_policy_rules_lock);
- list_for_each_entry_safe(rule , tmp, &ecm_sfe_l2_policy_rules, list) {
- list_del(&rule->list);
- spin_unlock_bh(&ecm_sfe_l2_policy_rules_lock);
- kfree(rule);
- spin_lock_bh(&ecm_sfe_l2_policy_rules_lock);
- }
- spin_unlock_bh(&ecm_sfe_l2_policy_rules_lock);
-}
-
-/*
- * ecm_sfe_l2_delete_policy_rule()
- * Deletes a policy rule with the given parameters.
- */
-static bool ecm_sfe_l2_delete_policy_rule(int ip_ver, uint32_t *sip_addr, int sport, uint32_t *dip_addr, int dport, int protocol)
-{
- struct ecm_sfe_l2_policy_rule *rule;
-
- spin_lock_bh(&ecm_sfe_l2_policy_rules_lock);
- rule = ecm_sfe_l2_policy_rule_find(ip_ver, sip_addr, sport, dip_addr, dport, protocol);
- if (!rule) {
- spin_unlock_bh(&ecm_sfe_l2_policy_rules_lock);
- pr_warn("rule cannot be found in the list\n");
- return false;
- }
- list_del(&rule->list);
- spin_unlock_bh(&ecm_sfe_l2_policy_rules_lock);
- kfree(rule);
-
- pr_info("rule deleted\n");
- return true;
-}
-
-/*
- * ecm_sfe_l2_add_policy_rule()
- * Adds a policy rule with the given parameters.
- */
-static bool ecm_sfe_l2_add_policy_rule(int ip_ver, uint32_t *sip_addr, int sport, uint32_t *dip_addr, int dport, int protocol, enum ecm_sfe_l2_policy_rule_dir direction)
-{
- struct ecm_sfe_l2_policy_rule *rule;
-
- spin_lock_bh(&ecm_sfe_l2_policy_rules_lock);
- rule = ecm_sfe_l2_policy_rule_find(ip_ver, sip_addr, sport, dip_addr, dport, protocol);
- if (rule) {
- if (rule->direction != direction) {
- pr_info("Update direction of the rule from %d to %d\n", rule->direction, direction);
- rule->direction = direction;
- }
- spin_unlock_bh(&ecm_sfe_l2_policy_rules_lock);
- pr_warn("rule is already present\n");
- return true;
- }
- spin_unlock_bh(&ecm_sfe_l2_policy_rules_lock);
-
- rule = kzalloc(sizeof(struct ecm_sfe_l2_policy_rule), GFP_ATOMIC);
- if (!rule) {
- pr_warn("alloc failed for new rule\n");
- return false;
- }
-
- rule->ip_ver = ip_ver;
- rule->protocol = protocol;
- rule->src_port = sport;
- rule->dest_port = dport;
- memcpy(rule->src_addr, sip_addr, sizeof(uint32_t) * 4);
- memcpy(rule->dest_addr, dip_addr, sizeof(uint32_t) * 4);
- rule->direction = direction;
-
- INIT_LIST_HEAD(&rule->list);
-
- spin_lock_bh(&ecm_sfe_l2_policy_rules_lock);
- list_add(&rule->list, &ecm_sfe_l2_policy_rules);
- spin_unlock_bh(&ecm_sfe_l2_policy_rules_lock);
-
- pr_info("rule added\n");
- return true;
-}
-
-/*
- * ecm_sfe_l2_policy_rule_write()
- * Adds a policy rule to the rule table.
- *
- * Policy rule must include cmd, ip_ver and direction. It can also include src/dest IP and ports, protocol.
- * cmd and ip_ver MUST be the first 2 options in the command.
- */
-static ssize_t ecm_sfe_l2_policy_rule_write(struct file *file,
- const char __user *user_buf, size_t count, loff_t *offset)
-{
- char *cmd_buf;
- char *fields;
- char *token;
- char *option, *value;
- int cmd = 0; /* must be present in the rule */
- int ip_ver = 0; /* must be present in the rule */
- uint32_t sip_addr[4] = {0};
- uint32_t dip_addr[4] = {0};
- int sport = 0;
- int dport = 0;
- int protocol = 0;
- int direction = 0; /* must be present in the rule */
-
- /*
- * Command is formed as:
- * echo "cmd=1 ip_ver=4 dport=443 protocol=6 direction=1" > /sys/kernel/debug/ecm_sfe_l2/policy_rules
- *
- * cmd: 1 is to add, 2 is to delete a rule.
- * direction: 1 is egress, 2 is ingress, 3 is both
- */
- cmd_buf = kzalloc(count + 1, GFP_ATOMIC);
- if (!cmd_buf) {
- pr_warn("unable to allocate memory for cmd buffer\n");
- return -ENOMEM;
- }
-
- count = simple_write_to_buffer(cmd_buf, count, offset, user_buf, count);
-
- /*
- * Split the buffer into tokens
- */
- fields = cmd_buf;
- while ((token = strsep(&fields, " "))) {
- pr_info("\ntoken: %s\n", token);
-
- option = strsep(&token, "=");
- value = token;
-
- pr_info("\t\toption: %s\n", option);
- pr_info("\t\tvalue: %s\n", value);
-
- if (!strcmp(option, "cmd")) {
- if (sscanf(value, "%d", &cmd)) {
- if (cmd != ECM_SFE_L2_POLICY_RULE_ADD && cmd != ECM_SFE_L2_POLICY_RULE_DEL &&
- cmd != ECM_SFE_L2_POLICY_RULE_FLUSH_ALL) {
- pr_err("invalid cmd value: %d\n", cmd);
- goto fail;
- }
- continue;
- }
- pr_warn("cannot read value\n");
- goto fail;
- }
-
- if (!strcmp(option, "ip_ver")) {
- if (sscanf(value, "%d", &ip_ver)) {
- if (ip_ver != 4 && ip_ver != 6) {
- pr_err("invalid ip_ver: %d\n", ip_ver);
- goto fail;
- }
- continue;
- }
- pr_warn("cannot read value\n");
- goto fail;
- }
-
- if (!strcmp(option, "protocol")) {
- if (sscanf(value, "%d", &protocol)) {
- continue;
- }
- pr_warn("cannot read value\n");
- goto fail;
- }
-
- if (!strcmp(option, "sport")) {
- if (sscanf(value, "%d", &sport)) {
- continue;
- }
- pr_warn("cannot read value\n");
- goto fail;
- }
-
- if (!strcmp(option, "dport")) {
- if (sscanf(value, "%d", &dport)) {
- continue;
- }
- pr_warn("cannot read value\n");
- goto fail;
- }
-
- if (!strcmp(option, "direction")) {
- if (cmd == ECM_SFE_L2_POLICY_RULE_DEL) {
- pr_err("direction is not allowed in delete command\n");
- goto fail;
- }
-
- if (sscanf(value, "%d", &direction)) {
- if (direction != ECM_SFE_L2_POLICY_RULE_EGRESS
- && direction != ECM_SFE_L2_POLICY_RULE_INGRESS
- && direction != ECM_SFE_L2_POLICY_RULE_EGRESS_INGRESS) {
-
- pr_err("invalid direction: %d\n", direction);
- goto fail;
- }
- continue;
- }
- pr_warn("cannot read value\n");
- goto fail;
- }
-
- if (!strcmp(option, "sip")) {
- if (ip_ver == 4) {
- if (!in4_pton(value, -1, (uint8_t *)&sip_addr[0], -1, NULL)) {
- pr_err("invalid source IP V4 value: %s\n", value);
- goto fail;
- }
- } else if (ip_ver ==6) {
- if (!in6_pton(value, -1, (uint8_t *)sip_addr, -1, NULL)) {
- pr_err("invalid source IP V6 value: %s\n", value);
- goto fail;
- }
- } else {
- pr_err("ip_ver hasn't been set yet\n");
- goto fail;
- }
- continue;
- }
-
- if (!strcmp(option, "dip")) {
- if (ip_ver == 4) {
- if (!in4_pton(value, -1, (uint8_t *)&dip_addr[0], -1, NULL)) {
- pr_err("invalid destination IP V4 value: %s\n", value);
- goto fail;
- }
- } else if (ip_ver == 6) {
- if (!in6_pton(value, -1, (uint8_t *)dip_addr, -1, NULL)) {
- pr_err("invalid destination IP V6 value: %s\n", value);
- goto fail;
- }
- } else {
- pr_err("ip_ver hasn't been set yet\n");
- goto fail;
- }
- continue;
- }
-
- pr_warn("unrecognized option: %s\n", option);
- goto fail;
- }
-
- kfree(cmd_buf);
-
- if (cmd == ECM_SFE_L2_POLICY_RULE_ADD) {
- if (!ecm_sfe_l2_add_policy_rule(ip_ver, sip_addr, sport, dip_addr, dport, protocol, direction)) {
- pr_err("Add policy rule failed\n");
- return -ENOMEM;
- }
- } else if (cmd == ECM_SFE_L2_POLICY_RULE_DEL) {
- if (!ecm_sfe_l2_delete_policy_rule(ip_ver, sip_addr, sport, dip_addr, dport, protocol)) {
- pr_err("Delete policy rule failed\n");
- return -ENOMEM;
- }
- } else if (cmd == ECM_SFE_L2_POLICY_RULE_FLUSH_ALL) {
- ecm_sfe_l2_flush_policy_rules();
- }
-
- return count;
-fail:
- kfree(cmd_buf);
- return -EINVAL;
-}
-
-/*
- * ecm_sfe_l2_policy_rule_seq_show()
- */
-static int ecm_sfe_l2_policy_rule_seq_show(struct seq_file *m, void *v)
-{
- struct ecm_sfe_l2_policy_rule *rule;
-
- rule = list_entry(v, struct ecm_sfe_l2_policy_rule, list);
-
- if (rule->ip_ver == 4) {
- seq_printf(m, "ip_ver: %d"
- "\tprotocol: %d"
- "\tsip_addr: %pI4"
- "\tdip_addr: %pI4"
- "\tsport: %d"
- "\tdport: %d"
- "\tdirection: %d\n",
- rule->ip_ver,
- rule->protocol,
- &rule->src_addr[0],
- &rule->dest_addr[0],
- rule->src_port,
- rule->dest_port,
- rule->direction);
- } else {
- struct in6_addr saddr;
- struct in6_addr daddr;
-
- memcpy(&saddr.s6_addr32, rule->src_addr, sizeof(uint32_t) * 4);
- memcpy(&daddr.s6_addr32, rule->dest_addr, sizeof(uint32_t) * 4);
-
- seq_printf(m, "ip_ver: %d"
- "\tprotocol: %d"
- "\tsip_addr: %pI6"
- "\tdip_addr: %pI6"
- "\tsport: %d"
- "\tdport: %d"
- "\tdirection: %d\n",
- rule->ip_ver,
- rule->protocol,
- &saddr,
- &daddr,
- rule->src_port,
- rule->dest_port,
- rule->direction);
- }
-
- return 0;
-}
-
-/*
- * ecm_sfe_l2_policy_rule_seq_stop()
- */
-static void ecm_sfe_l2_policy_rule_seq_stop(struct seq_file *p, void *v)
-{
- spin_unlock_bh(&ecm_sfe_l2_policy_rules_lock);
-}
-
-/*
- * ecm_sfe_l2_policy_rule_seq_next()
- */
-static void *ecm_sfe_l2_policy_rule_seq_next(struct seq_file *p, void *v,
- loff_t *pos)
-{
- return seq_list_next(v, &ecm_sfe_l2_policy_rules, pos);
-}
-
-/*
- * ecm_sfe_l2_policy_rule_seq_start()
- */
-static void *ecm_sfe_l2_policy_rule_seq_start(struct seq_file *m, loff_t *_pos)
-{
- spin_lock_bh(&ecm_sfe_l2_policy_rules_lock);
- return seq_list_start(&ecm_sfe_l2_policy_rules, *_pos);
-}
-
-static const struct seq_operations ecm_sfe_l2_policy_rule_seq_ops = {
- .start = ecm_sfe_l2_policy_rule_seq_start,
- .next = ecm_sfe_l2_policy_rule_seq_next,
- .stop = ecm_sfe_l2_policy_rule_seq_stop,
- .show = ecm_sfe_l2_policy_rule_seq_show,
-};
-
-/*
- * ecm_sfe_l2_policy_rule_open()
- */
-static int ecm_sfe_l2_policy_rule_open(struct inode *inode, struct file *file)
-{
- return seq_open(file, &ecm_sfe_l2_policy_rule_seq_ops);
-}
-
-/*
- * File operations for policy rules add/delete/list operations.
- */
-static const struct file_operations ecm_sfe_l2_policy_rule_fops = {
- .owner = THIS_MODULE,
- .open = ecm_sfe_l2_policy_rule_open,
- .write = ecm_sfe_l2_policy_rule_write,
- .read = seq_read,
- .llseek = seq_lseek,
- .release = seq_release,
-};
-
-/*
- * ecm_sfe_l2_defunct_by_5tuple_write()
- * Writes the defunct by 5-tuple command to the debugfs node.
- */
-static ssize_t ecm_sfe_l2_defunct_by_5tuple_write(struct file *f, const char *user_buf,
- size_t count, loff_t *offset)
-{
- int ret = -EINVAL;
- char *cmd_buf;
- int field_count;
- char *fields_ptr;
- char *fields[ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_MAX];
- char *option, *value;
- int ip_ver;
- uint32_t sip_addr_v4;
- uint32_t dip_addr_v4;
- struct in6_addr sip_addr_v6;
- struct in6_addr dip_addr_v6;
- int sport, dport;
- int protocol;
- bool defunct_result;
-
- /*
- * Command is formed as for IPv4 and IPv6 5-tuples as below respectively.
- *
- * echo "ip_ver=4 sip=192.168.1.100 sport=443 dip=192.168.2.100 dport=1000 protocol=6" > /sys/kernel/debug/ecm_sfe_l2/defunct_by_5tuple
- * echo “ip_ver=6 sip=2aaa::100 sport=443 dip=3bbb::200 dport=1000 protocol=6” > /sys/kernel/debug/ecm_sfe_l2/defunct_by_5tuple
- *
- * The order of the options MUST be as above and it MUST contain all the 5-tuple fields and the ip_ver.
- */
- cmd_buf = kzalloc(count + 1, GFP_ATOMIC);
- if (!cmd_buf) {
- pr_warn("unable to allocate memory for cmd buffer\n");
- return -ENOMEM;
- }
-
- count = simple_write_to_buffer(cmd_buf, count, offset, user_buf, count);
-
- /*
- * Split the buffer into its fields
- */
- field_count = 0;
- fields_ptr = cmd_buf;
- fields[field_count] = strsep(&fields_ptr, " ");
- while (fields[field_count] != NULL) {
- pr_info("Field %d: %s\n", field_count, fields[field_count]);
- field_count++;
- if (field_count == ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_MAX)
- break;
-
- fields[field_count] = strsep(&fields_ptr, " ");
- }
-
- if (field_count != ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_MAX) {
- kfree(cmd_buf);
- pr_err("Invalid field count %d\n", field_count);
- return -EINVAL;
- }
-
- /*
- * IP version (ip_ver) field validation.
- */
- option = strsep(&fields[ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_IP_VERSION], "=");
- if (!option || strcmp(option, "ip_ver")) {
- pr_err("invalid IP version option name: %s\n", option);
- goto fail;
- }
- value = fields[ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_IP_VERSION];
- if (!sscanf(value, "%d", &ip_ver)) {
- pr_err("Unable to read IP version value %s\n", value);
- goto fail;
- }
- if (ip_ver != 4 && ip_ver != 6) {
- pr_err("invalid IP version: %d\n", ip_ver);
- goto fail;
- }
-
- /*
- * Source IP (sip) field validation.
- */
- option = strsep(&fields[ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_SIP], "=");
- if (!option || strcmp(option, "sip")) {
- pr_err("invalid source IP option name: %s\n", option);
- goto fail;
- }
- value = fields[ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_SIP];
-
- if (ip_ver == 4) {
- if (!in4_pton(value, -1, (uint8_t *)&sip_addr_v4, -1, NULL)) {
- pr_err("invalid source IP V4 value: %s\n", value);
- goto fail;
- }
- } else {
- if (!in6_pton(value, -1, (uint8_t *)sip_addr_v6.s6_addr, -1, NULL)) {
- pr_err("invalid source IP V6 value: %s\n", value);
- goto fail;
- }
- }
-
- /*
- * Source port (sport) field validadtion.
- */
- option = strsep(&fields[ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_SPORT], "=");
- if (!option || strcmp(option, "sport")) {
- pr_err("invalid source port option name: %s\n", option);
- goto fail;
- }
- value = fields[ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_SPORT];
- if (!sscanf(value, "%d", &sport)) {
- pr_err("Unable to read source port value %s\n", value);
- goto fail;
- }
-
- /*
- * Destination IP (dip) field validation.
- */
- option = strsep(&fields[ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_DIP], "=");
- if (!option || strcmp(option, "dip")) {
- pr_err("invalid destination IP option name: %s\n", option);
- goto fail;
- }
- value = fields[ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_DIP];
-
- if (ip_ver == 4) {
- if (!in4_pton(value, -1, (uint8_t *)&dip_addr_v4, -1, NULL)) {
- pr_err("invalid destination IP V4 value: %s\n", value);
- goto fail;
- }
- } else {
- if (!in6_pton(value, -1, (uint8_t *)dip_addr_v6.s6_addr, -1, NULL)) {
- pr_err("invalid destination IP V6 value: %s\n", value);
- goto fail;
- }
- }
-
- /*
- * Destination port (dport) field validadtion.
- */
- option = strsep(&fields[ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_DPORT], "=");
- if (!option || strcmp(option, "dport")) {
- pr_err("invalid destination port option name: %s\n", option);
- goto fail;
- }
- value = fields[ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_DPORT];
- if (!sscanf(value, "%d", &dport)) {
- pr_err("Unable to read destination port value %s\n", value);
- goto fail;
- }
-
- /*
- * Protocol field validadtion.
- */
- option = strsep(&fields[ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_PROTOCOL], "=");
- if (!option || strcmp(option, "protocol")) {
- pr_err("invalid protocol option name: %s\n", option);
- goto fail;
- }
- value = fields[ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_PROTOCOL];
- if (!sscanf(value, "%d", &protocol)) {
- pr_err("Unable to read protocol value %s\n", value);
- goto fail;
- }
-
- /*
- * Call 5-tuple defunct functions.
- */
- if (ip_ver == 4) {
- pr_debug("sip: %pI4 sport: %d dip: %pI4 dport: %d protocol: %d\n", &sip_addr_v4, sport, &dip_addr_v4, dport, protocol);
- defunct_result = ecm_sfe_common_defunct_ipv4_connection(sip_addr_v4, htons(sport), dip_addr_v4, htons(dport), protocol);
- } else {
- pr_debug("sip: %pI6 sport: %d dip: %pI6 dport: %d protocol: %d\n", &sip_addr_v6, sport, &dip_addr_v6, dport, protocol);
- defunct_result = ecm_sfe_common_defunct_ipv6_connection(&sip_addr_v6, htons(sport), &dip_addr_v6, htons(dport), protocol);
- }
-
- if (!defunct_result) {
- pr_warn("No connection found with this 5-tuple\n");
- }
-
- ret = count;
-fail:
- kfree(cmd_buf);
-
- return ret;
-}
-
-/*
- * File operations for defunct by 5-tuple operations.
- */
-static struct file_operations ecm_sfe_l2_defunct_by_5tuple_fops = {
- .owner = THIS_MODULE,
- .write = ecm_sfe_l2_defunct_by_5tuple_write,
-};
-
-/*
- * ecm_sfe_l2_defunct_by_port_write()
- * Writes the defunct by port command to the debugfs node.
- */
-static ssize_t ecm_sfe_l2_defunct_by_port_write(struct file *f, const char *user_buf,
- size_t count, loff_t *offset)
-{
- char *cmd_buf;
- char *fields;
- char *option, *value;
- int port;
- int direction;
-
- /*
- * Command is formed as:
- *
- * echo “sport=443” > /sys/kernel/debug/ecm_sfe_l2/defunct_by_port
- * echo “dport=443” > /sys/kernel/debug/ecm_sfe_l2/defunct_by_port
- */
- cmd_buf = kzalloc(count + 1, GFP_ATOMIC);
- if (!cmd_buf) {
- pr_warn("unable to allocate memory for cmd buffer\n");
- return -ENOMEM;
- }
-
- count = simple_write_to_buffer(cmd_buf, count, offset, user_buf, count);
-
- /*
- * Split the buffer into its fields
- */
- fields = cmd_buf;
- option = strsep(&fields, "=");
- if (!strcmp(option, "sport")) {
- direction = 0;
- } else if (!strcmp(option, "dport")) {
- direction = 1;
- } else {
- pr_err("invalid option name: %s\n", option);
- kfree(cmd_buf);
- return -EINVAL;
- }
-
- value = fields;
- if (!sscanf(value, "%d", &port)) {
- pr_err("Unable to read port value %s\n", value);
- kfree(cmd_buf);
- return -EINVAL;
- }
- pr_debug("option: %s value: %d\n", option, port);
-
- kfree(cmd_buf);
-
- /*
- * Call port based defunct function.
- */
- ecm_sfe_common_defunct_by_port(port, direction, wan_name);
-
- return count;
-}
-
-/*
- * File operations for defunct by port operations.
- */
-static struct file_operations ecm_sfe_l2_defunct_by_port_fops = {
- .owner = THIS_MODULE,
- .write = ecm_sfe_l2_defunct_by_port_write,
-};
-
-/*
- * ecm_sfe_l2_defunct_by_protocol_write()
- * Writes the defunct by protocol command to the debugfs node.
- */
-static ssize_t ecm_sfe_l2_defunct_by_protocol_write(struct file *f, const char *user_buf,
- size_t count, loff_t *offset)
-{
- char *cmd_buf;
- char *fields;
- char *option, *value;
- int protocol;
-
- /*
- * Command is formed as:
- *
- * echo “protocol=6” > /sys/kernel/debug/ecm_sfe_l2/defunct_by_protocol
- */
- cmd_buf = kzalloc(count + 1, GFP_ATOMIC);
- if (!cmd_buf) {
- pr_warn("unable to allocate memory for cmd buffer\n");
- return -ENOMEM;
- }
-
- count = simple_write_to_buffer(cmd_buf, count, offset, user_buf, count);
-
- /*
- * Split the buffer into its fields
- */
- fields = cmd_buf;
- option = strsep(&fields, "=");
- if (strcmp(option, "protocol")) {
- pr_err("invalid option name: %s\n", option);
- kfree(cmd_buf);
- return -EINVAL;
- }
-
- value = fields;
- if (!sscanf(value, "%d", &protocol)) {
- pr_err("Unable to read protocol value %s\n", value);
- kfree(cmd_buf);
- return -EINVAL;
- }
- pr_debug("option: %s value: %d\n", option, protocol);
-
- kfree(cmd_buf);
-
- /*
- * Defunct the connections which has this protocol number.
- */
- ecm_sfe_common_defunct_by_protocol(protocol);
-
- return count;
-}
-
-/*
- * File operations for defunct by protocol operations.
- */
-static struct file_operations ecm_sfe_l2_defunct_by_protocol_fops = {
- .owner = THIS_MODULE,
- .write = ecm_sfe_l2_defunct_by_protocol_write,
-};
-
-/*
- * ecm_sfe_l2_wan_name_read()
- * Reads the WAN interface name from the debugfs node wan_name
- */
-static ssize_t ecm_sfe_l2_wan_name_read(struct file *f, char *buffer,
- size_t len, loff_t *offset)
-{
- return simple_read_from_buffer(buffer, len, offset, wan_name, wan_name_len);
-}
-
-/*
- * ecm_sfe_l2_wan_name_write()
- * Writes the WAN interface name to the debugfs node wan_name
- */
-static ssize_t ecm_sfe_l2_wan_name_write(struct file *f, const char *buffer,
- size_t len, loff_t *offset)
-{
- ssize_t ret;
-
- if (len > IFNAMSIZ) {
- pr_err("WAN interface name is too long\n");
- return -EINVAL;
- }
-
- ret = simple_write_to_buffer(wan_name, IFNAMSIZ, offset, buffer, len);
- if (ret < 0) {
- pr_err("WAN interface name cannot be written\n");
- return ret;
- }
-
- wan_name[ret - 1] = '\0';
- wan_name_len = ret;
-
- return ret;
-}
-
-/*
- * File operations for wan interface name.
- */
-static struct file_operations ecm_sfe_l2_wan_name_fops = {
- .owner = THIS_MODULE,
- .write = ecm_sfe_l2_wan_name_write,
- .read = ecm_sfe_l2_wan_name_read,
-};
-
-struct ecm_sfe_common_callbacks sfe_cbs = {
- .l2_accel_check = ecm_sfe_l2_accel_check_callback, /**< Callback to decide if L2 acceleration is wanted for the flow. */
-};
-
-/*
- * ecm_sfe_l2_init()
- */
-static int __init ecm_sfe_l2_init(void)
-{
- pr_debug("ECM SFE L2 module INIT\n");
-
- /*
- * Create entries in DebugFS for control functions
- */
- ecm_sfe_l2_dentry = debugfs_create_dir("ecm_sfe_l2", NULL);
- if (!ecm_sfe_l2_dentry) {
- pr_info("Failed to create SFE L2 directory entry\n");
- return -1;
- }
-
- if (!debugfs_create_file("wan_name", S_IWUSR, ecm_sfe_l2_dentry,
- NULL, &ecm_sfe_l2_wan_name_fops)) {
- pr_debug("Failed to create ecm wan interface file in debugfs\n");
- debugfs_remove_recursive(ecm_sfe_l2_dentry);
- return -1;
- }
-
- if (!debugfs_create_file("policy_rules", S_IWUSR, ecm_sfe_l2_dentry,
- NULL, &ecm_sfe_l2_policy_rule_fops)) {
- pr_debug("Failed to create ecm SFE L2 policy rules file in debugfs\n");
- debugfs_remove_recursive(ecm_sfe_l2_dentry);
- return -1;
- }
-
- if (!debugfs_create_file("defunct_by_protocol", S_IWUSR, ecm_sfe_l2_dentry,
- NULL, &ecm_sfe_l2_defunct_by_protocol_fops)) {
- pr_debug("Failed to create ecm defunct by protocol file in debugfs\n");
- debugfs_remove_recursive(ecm_sfe_l2_dentry);
- return -1;
- }
-
- if (!debugfs_create_file("defunct_by_5tuple", S_IWUSR, ecm_sfe_l2_dentry,
- NULL, &ecm_sfe_l2_defunct_by_5tuple_fops)) {
- pr_debug("Failed to create ecm defunct by 5tuple file in debugfs\n");
- debugfs_remove_recursive(ecm_sfe_l2_dentry);
- return -1;
- }
-
- if (!debugfs_create_file("defunct_by_port", S_IWUSR, ecm_sfe_l2_dentry,
- NULL, &ecm_sfe_l2_defunct_by_port_fops)) {
- pr_debug("Failed to create ecm defunct by port file in debugfs\n");
- debugfs_remove_recursive(ecm_sfe_l2_dentry);
- return -1;
- }
-
- if (ecm_sfe_common_callbacks_register(&sfe_cbs)) {
- pr_debug("Failed to register callbacks\n");
- debugfs_remove_recursive(ecm_sfe_l2_dentry);
- return -1;
- }
- return 0;
-}
-
-/*
- * ecm_sfe_l2_exit()
- */
-static void __exit ecm_sfe_l2_exit(void)
-{
- pr_debug("ECM SFE L2 check module EXIT\n");
-
- ecm_sfe_common_callbacks_unregister();
-
- /*
- * Remove the debugfs files recursively.
- */
- debugfs_remove_recursive(ecm_sfe_l2_dentry);
-}
-
-module_init(ecm_sfe_l2_init)
-module_exit(ecm_sfe_l2_exit)
-
-MODULE_DESCRIPTION("ECM SFE L2 Module");
-#ifdef MODULE_LICENSE
-MODULE_LICENSE("Dual BSD/GPL");
-#endif
diff --git a/qca-nss-ecm/exports/ecm_sfe_common_public.h b/qca-nss-ecm/exports/ecm_sfe_common_public.h
deleted file mode 100644
index 41f6b6e..0000000
--- a/qca-nss-ecm/exports/ecm_sfe_common_public.h
+++ /dev/null
@@ -1,136 +0,0 @@
-/*
- **************************************************************************
- * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
- *
- * Permission to use, copy, modify, and/or distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
-
- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
- **************************************************************************
- */
-
-/**
- * @file ecm_fe_common_public.h
- * ECM SFE frontend public APIs and data structures.
- */
-
-#ifndef __ECM_SFE_COMMON_PUBLIC_H__
-#define __ECM_SFE_COMMON_PUBLIC_H__
-
-/**
- * @addtogroup ecm_sfe_common_subsystem
- * @{
- */
-
-#define ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED (1 << 0) /**< L2 acceleration is allowed on the flow interface. */
-#define ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED (1 << 1) /**< L2 acceleration is allowed on the return interface. */
-
-/**
- * SFE common 5-tuple for external use.
- */
-struct ecm_sfe_common_tuple {
- uint32_t src_addr[4]; /**< Source IP in host order. */
- uint32_t dest_addr[4]; /**< Destination IP in host order. */
-
- uint16_t src_port; /**< Source port port in host order. */
- uint16_t dest_port; /**< Destination port in host order. */
- uint32_t src_ifindex; /**< Source L2 interface index */
- uint32_t dest_ifindex; /**< Destination L2 interface index */
- uint8_t protocol; /**< Next protocol header number. */
- uint8_t ip_ver; /**< IP version 4 or 6. */
-};
-
-/**
- * Callback to which SFE clients will register and return bitmap of values that indicate L2 acceleration for each direction.
- */
-typedef uint32_t (*ecm_sfe_common_l2_accel_check_callback_t)(struct ecm_sfe_common_tuple *tuple);
-
-/**
- * Data structure for SFE common callbacks.
- */
-struct ecm_sfe_common_callbacks {
- ecm_sfe_common_l2_accel_check_callback_t l2_accel_check; /**< Callback to decide if L2 acceleration is wanted for the flow. */
-};
-
-/**
- * Defuncts an IPv4 5-tuple connection.
- *
- * @param src_ip The source IP address.
- * @param src_port The source port.
- * @param dest_ip The destination IP address.
- * @param dest_port The destination port.
- * @param protocol The protocol.
- *
- * @return
- * True if defuncted; false if not.
- */
-bool ecm_sfe_common_defunct_ipv4_connection(__be32 src_ip, int src_port,
- __be32 dest_ip, int dest_port, int protocol);
-
-/**
- * Defuncts an IPv6 5-tuple connection.
- *
- * @param src_ip The source IP address.
- * @param src_port The source port.
- * @param dest_ip The destination IP address.
- * @param dest_port The destination port.
- * @param protocol The protocol.
- *
- * @return
- * True if defuncted; false if not.
- */
-bool ecm_sfe_common_defunct_ipv6_connection(struct in6_addr *src_ip, int src_port,
- struct in6_addr *dest_ip, int dest_port, int protocol);
-
-/**
- * Defuncts all the connections with this protocol type.
- *
- * @param protocol Protocol type.
- *
- * @return
- * None.
- */
-void ecm_sfe_common_defunct_by_protocol(int protocol);
-
-/**
- * Defuncts all the connections with this port number in the correct direction.
- *
- * @param port The port number.
- * @param direction The direction of the port (source (1) or destination (2))
- * @param wan_name The WAN port interface name.
- *
- * @return
- * None.
- */
-void ecm_sfe_common_defunct_by_port(int port, int direction, char *wan_name);
-
-/**
- * Registers a client for SFE common callbacks.
- *
- * @param sfe_cb SFE common callback pointer.
- *
- * @return
- * 0 if success, error value if fails.
- */
-int ecm_sfe_common_callbacks_register(struct ecm_sfe_common_callbacks *sfe_cb);
-
-/**
- * Unregisters a client from SFE common callbacks.
- *
- * @return
- * None.
- */
-void ecm_sfe_common_callbacks_unregister(void);
-
-/**
- * @}
- */
-
-#endif /* __ECM_SFE_COMMON_PUBLIC_H__ */
diff --git a/qca-nss-ecm/frontends/cmn/ecm_non_ported_ipv4.c b/qca-nss-ecm/frontends/cmn/ecm_non_ported_ipv4.c
index c28f770..20f7d0d 100644
--- a/qca-nss-ecm/frontends/cmn/ecm_non_ported_ipv4.c
+++ b/qca-nss-ecm/frontends/cmn/ecm_non_ported_ipv4.c
@@ -295,7 +295,7 @@
case ECM_AE_CLASSIFIER_RESULT_NSS:
if (!ecm_nss_feature_check(skb, ip_hdr)) {
DEBUG_WARN("Unsupported feature found for NSS acceleration\n");
- return NF_ACCEPT;
+ goto fail_1;
}
defunct_callback = ecm_nss_non_ported_ipv4_connection_defunct_callback;
feci = (struct ecm_front_end_connection_instance *)ecm_nss_non_ported_ipv4_connection_instance_alloc(can_accel, protocol, &nci);
diff --git a/qca-nss-ecm/frontends/cmn/ecm_non_ported_ipv6.c b/qca-nss-ecm/frontends/cmn/ecm_non_ported_ipv6.c
index ba50952..7658828 100644
--- a/qca-nss-ecm/frontends/cmn/ecm_non_ported_ipv6.c
+++ b/qca-nss-ecm/frontends/cmn/ecm_non_ported_ipv6.c
@@ -294,7 +294,7 @@
case ECM_AE_CLASSIFIER_RESULT_NSS:
if (!ecm_nss_feature_check(skb, ip_hdr)) {
DEBUG_WARN("Unsupported feature found for NSS acceleration\n");
- return NF_ACCEPT;
+ goto fail_1;
}
defunct_callback = ecm_nss_non_ported_ipv6_connection_defunct_callback;
feci = (struct ecm_front_end_connection_instance *)ecm_nss_non_ported_ipv6_connection_instance_alloc(can_accel, protocol, &nci);
diff --git a/qca-nss-ecm/frontends/ecm_front_end_common.c b/qca-nss-ecm/frontends/ecm_front_end_common.c
index f6b5300..40e6abe 100644
--- a/qca-nss-ecm/frontends/ecm_front_end_common.c
+++ b/qca-nss-ecm/frontends/ecm_front_end_common.c
@@ -84,7 +84,7 @@
ECM_FE_FEATURE_SFE | ECM_FE_FEATURE_NON_PORTED | ECM_FE_FEATURE_CONN_LIMIT | /* SFE type */
ECM_FE_FEATURE_OVS_BRIDGE | ECM_FE_FEATURE_OVS_VLAN | ECM_FE_FEATURE_BRIDGE |
- ECM_FE_FEATURE_BONDING | ECM_FE_FEATURE_SRC_IF_CHECK,
+ ECM_FE_FEATURE_BONDING,
ECM_FE_FEATURE_NSS | ECM_FE_FEATURE_SFE | ECM_FE_FEATURE_NON_PORTED | ECM_FE_FEATURE_BRIDGE |
ECM_FE_FEATURE_MULTICAST | ECM_FE_FEATURE_BONDING | ECM_FE_FEATURE_IGS |
@@ -612,9 +612,6 @@
* Unregister sysctl table.
*/
if (ecm_front_end_ctl_tbl_hdr) {
-#ifdef ECM_FRONT_END_SFE_ENABLE
- ecm_sfe_sysctl_tbl_exit();
-#endif
unregister_sysctl_table(ecm_front_end_ctl_tbl_hdr);
}
}
diff --git a/qca-nss-ecm/frontends/include/ecm_front_end_common.h b/qca-nss-ecm/frontends/include/ecm_front_end_common.h
index 0c70a1d..46c50d8 100644
--- a/qca-nss-ecm/frontends/include/ecm_front_end_common.h
+++ b/qca-nss-ecm/frontends/include/ecm_front_end_common.h
@@ -312,6 +312,6 @@
void ecm_front_end_common_sysctl_register(void);
void ecm_front_end_common_sysctl_unregister(void);
int ecm_sfe_sysctl_tbl_init(void);
-void ecm_sfe_sysctl_tbl_exit(void);
#endif /* __ECM_FRONT_END_COMMON_H */
+
diff --git a/qca-nss-ecm/frontends/include/ecm_front_end_types.h b/qca-nss-ecm/frontends/include/ecm_front_end_types.h
index 2dcf3c1..f7e2de1 100644
--- a/qca-nss-ecm/frontends/include/ecm_front_end_types.h
+++ b/qca-nss-ecm/frontends/include/ecm_front_end_types.h
@@ -108,11 +108,10 @@
* An acceleration mode less than zero indicates a connection that cannot be accelerated, maybe due to error.
*/
enum ecm_front_end_acceleration_modes {
- ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT_SHORT = -8,/* Acceleration has failed for a short time due to the connection has become defunct and waiting for the removal */
ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT = -7, /* Acceleration has permanently failed due to the connection has become defunct */
ECM_FRONT_END_ACCELERATION_MODE_FAIL_DECEL = -6, /* Acceleration has permanently failed due to deceleration malfunction */
ECM_FRONT_END_ACCELERATION_MODE_FAIL_NO_ACTION = -5, /* Acceleration has permanently failed due to too many offloads that were rejected without any packets being offloaded */
- ECM_FRONT_END_ACCELERATION_MODE_FAIL_ACCEL_ENGINE = -4, /* Acceleration has permanently failed due to too many accel engine NAK's */
+ ECM_FRONT_END_ACCELERATION_MODE_FAIL_ACCEL_ENGINE = -4, /* Acceleration has permanently failed due to too many accel engine NAK's */
ECM_FRONT_END_ACCELERATION_MODE_FAIL_DRIVER = -3, /* Acceleration has permanently failed due to too many driver interaction failures */
ECM_FRONT_END_ACCELERATION_MODE_FAIL_RULE = -2, /* Acceleration has permanently failed due to bad rule data */
ECM_FRONT_END_ACCELERATION_MODE_FAIL_DENIED = -1, /* Acceleration has permanently failed due to can_accel denying accel */
diff --git a/qca-nss-ecm/frontends/nss/ecm_nss_multicast_ipv4.c b/qca-nss-ecm/frontends/nss/ecm_nss_multicast_ipv4.c
index 7591331..405c5f5 100644
--- a/qca-nss-ecm/frontends/nss/ecm_nss_multicast_ipv4.c
+++ b/qca-nss-ecm/frontends/nss/ecm_nss_multicast_ipv4.c
@@ -1584,7 +1584,7 @@
* If connection became defunct then set mode so that no further accel/decel attempts occur.
*/
if (feci->is_defunct) {
- feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT_SHORT;
+ feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT;
}
spin_unlock_bh(&feci->lock);
diff --git a/qca-nss-ecm/frontends/nss/ecm_nss_multicast_ipv6.c b/qca-nss-ecm/frontends/nss/ecm_nss_multicast_ipv6.c
index 91d3a9d..29a490c 100644
--- a/qca-nss-ecm/frontends/nss/ecm_nss_multicast_ipv6.c
+++ b/qca-nss-ecm/frontends/nss/ecm_nss_multicast_ipv6.c
@@ -1539,7 +1539,7 @@
* If connection became defunct then set mode so that no further accel/decel attempts occur.
*/
if (feci->is_defunct) {
- feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT_SHORT;
+ feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT;
}
spin_unlock_bh(&feci->lock);
diff --git a/qca-nss-ecm/frontends/nss/ecm_nss_non_ported_ipv4.c b/qca-nss-ecm/frontends/nss/ecm_nss_non_ported_ipv4.c
index ec4f365..927355c 100644
--- a/qca-nss-ecm/frontends/nss/ecm_nss_non_ported_ipv4.c
+++ b/qca-nss-ecm/frontends/nss/ecm_nss_non_ported_ipv4.c
@@ -1460,7 +1460,7 @@
* If connection became defunct then set mode so that no further accel/decel attempts occur.
*/
if (feci->is_defunct) {
- feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT_SHORT;
+ feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT;
}
spin_unlock_bh(&feci->lock);
diff --git a/qca-nss-ecm/frontends/nss/ecm_nss_non_ported_ipv6.c b/qca-nss-ecm/frontends/nss/ecm_nss_non_ported_ipv6.c
index 13e0cc5..086d108 100644
--- a/qca-nss-ecm/frontends/nss/ecm_nss_non_ported_ipv6.c
+++ b/qca-nss-ecm/frontends/nss/ecm_nss_non_ported_ipv6.c
@@ -1321,7 +1321,7 @@
* If connection became defunct then set mode so that no further accel/decel attempts occur.
*/
if (feci->is_defunct) {
- feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT_SHORT;
+ feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT;
}
spin_unlock_bh(&feci->lock);
diff --git a/qca-nss-ecm/frontends/nss/ecm_nss_ported_ipv4.c b/qca-nss-ecm/frontends/nss/ecm_nss_ported_ipv4.c
index 6375b6b..8d87036 100644
--- a/qca-nss-ecm/frontends/nss/ecm_nss_ported_ipv4.c
+++ b/qca-nss-ecm/frontends/nss/ecm_nss_ported_ipv4.c
@@ -1557,7 +1557,7 @@
* If connection became defunct then set mode so that no further accel/decel attempts occur.
*/
if (feci->is_defunct) {
- feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT_SHORT;
+ feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT;
}
spin_unlock_bh(&feci->lock);
diff --git a/qca-nss-ecm/frontends/nss/ecm_nss_ported_ipv6.c b/qca-nss-ecm/frontends/nss/ecm_nss_ported_ipv6.c
index 1be8a58..886149f 100644
--- a/qca-nss-ecm/frontends/nss/ecm_nss_ported_ipv6.c
+++ b/qca-nss-ecm/frontends/nss/ecm_nss_ported_ipv6.c
@@ -1467,7 +1467,7 @@
* If connection became defunct then set mode so that no further accel/decel attempts occur.
*/
if (feci->is_defunct) {
- feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT_SHORT;
+ feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT;
}
spin_unlock_bh(&feci->lock);
diff --git a/qca-nss-ecm/frontends/sfe/ecm_sfe_common.c b/qca-nss-ecm/frontends/sfe/ecm_sfe_common.c
index 6cb1473..f7aee0d 100644
--- a/qca-nss-ecm/frontends/sfe/ecm_sfe_common.c
+++ b/qca-nss-ecm/frontends/sfe/ecm_sfe_common.c
@@ -41,18 +41,6 @@
#include "ecm_sfe_ipv4.h"
#include "ecm_sfe_ipv6.h"
#include "ecm_sfe_common.h"
-#include "exports/ecm_sfe_common_public.h"
-
-
-/*
- * Callback object to support SFE frontend interaction with external code
- */
-struct ecm_sfe_common_callbacks ecm_sfe_cb;
-
-/*
- * Sysctl table
- */
-static struct ctl_table_header *ecm_sfe_ctl_tbl_hdr;
static bool ecm_sfe_fast_xmit_enable = true;
@@ -290,8 +278,7 @@
*/
int ecm_sfe_sysctl_tbl_init()
{
- ecm_sfe_ctl_tbl_hdr = register_sysctl(ECM_FRONT_END_SYSCTL_PATH, ecm_sfe_sysctl_tbl);
- if (!ecm_sfe_ctl_tbl_hdr) {
+ if (!register_sysctl(ECM_FRONT_END_SYSCTL_PATH, ecm_sfe_sysctl_tbl)) {
DEBUG_WARN("Unable to register ecm_sfe_sysctl_tbl");
return -EINVAL;
}
@@ -300,17 +287,6 @@
}
/*
- * ecm_sfe_sysctl_tbl_exit()
- * Unregister sysctl for SFE
- */
-void ecm_sfe_sysctl_tbl_exit()
-{
- if (ecm_sfe_ctl_tbl_hdr) {
- unregister_sysctl_table(ecm_sfe_ctl_tbl_hdr);
- }
-}
-
-/*
* ecm_sfe_common_init_fe_info()
* Initialize common fe info
*/
@@ -395,119 +371,3 @@
break;
}
}
-
-/*
- * ecm_sfe_common_tuple_set()
- * Sets the SFE common tuple object with the ECM connection rule paramaters.
- *
- * This tuple object will be used by external module to make decision on L2 acceleration.
- */
-void ecm_sfe_common_tuple_set(struct ecm_front_end_connection_instance *feci,
- int32_t from_iface_id, int32_t to_iface_id,
- struct ecm_sfe_common_tuple *tuple)
-{
- ip_addr_t saddr;
- ip_addr_t daddr;
-
- tuple->protocol = ecm_db_connection_protocol_get(feci->ci);
- tuple->ip_ver = feci->ip_version;
-
- tuple->src_port = ecm_db_connection_port_get(feci->ci, ECM_DB_OBJ_DIR_FROM);
- tuple->dest_port = ecm_db_connection_port_get(feci->ci, ECM_DB_OBJ_DIR_TO);
-
- tuple->src_ifindex = from_iface_id;
- tuple->dest_ifindex = to_iface_id;
-
- ecm_db_connection_address_get(feci->ci, ECM_DB_OBJ_DIR_FROM, saddr);
- ecm_db_connection_address_get(feci->ci, ECM_DB_OBJ_DIR_TO, daddr);
-
- if (feci->ip_version == 4) {
- ECM_IP_ADDR_TO_NIN4_ADDR(tuple->src_addr[0], saddr);
- ECM_IP_ADDR_TO_NIN4_ADDR(tuple->dest_addr[0], daddr);
- } else {
- ECM_IP_ADDR_TO_SFE_IPV6_ADDR(tuple->src_addr, saddr);
- ECM_IP_ADDR_TO_SFE_IPV6_ADDR(tuple->dest_addr, daddr);
- }
-}
-
-/*
- * ecm_sfe_common_defunct_ipv4_connection()
- * Defunct an IPv4 5-tuple connection.
- */
-bool ecm_sfe_common_defunct_ipv4_connection(__be32 src_ip, int src_port,
- __be32 dest_ip, int dest_port, int protocol)
-{
- return ecm_db_connection_decel_v4(src_ip, src_port, dest_ip, dest_port, protocol);
-}
-EXPORT_SYMBOL(ecm_sfe_common_defunct_ipv4_connection);
-
-/*
- * ecm_sfe_common_defunct_ipv6_connection()
- * Defunct an IPv6 5-tuple connection.
- */
-bool ecm_sfe_common_defunct_ipv6_connection(struct in6_addr *src_ip, int src_port,
- struct in6_addr *dest_ip, int dest_port, int protocol)
-{
- return ecm_db_connection_decel_v6(src_ip, src_port, dest_ip, dest_port, protocol);
-}
-EXPORT_SYMBOL(ecm_sfe_common_defunct_ipv6_connection);
-
-/*
- * ecm_sfe_common_defunct_by_protocol()
- * Defunct the connections by the protocol type (e.g:TCP, UDP)
- */
-void ecm_sfe_common_defunct_by_protocol(int protocol)
-{
- ecm_db_connection_defunct_by_protocol(protocol);
-}
-EXPORT_SYMBOL(ecm_sfe_common_defunct_by_protocol);
-
-/*
- * ecm_sfe_common_defunct_by_port()
- * Defunct the connections associated with this port in the direction
- * relative to the ECM's connection direction as well.
- *
- * TODO:
- * For now, all the connections from/to this port number are defuncted.
- * Directional defunct can be implemented later, but there is a trade of here:
- * For each connection in the database, the connection's from/to interfaces will
- * be checked with the wan_name and direction will be determined and then the connection
- * will be defuncted if there is a match with this port number. This process may be heavier
- * than defuncting all the connections from/to this port number. So, the direction and wan_name
- * are optional for this API for now.
- */
-void ecm_sfe_common_defunct_by_port(int port, int direction, char *wan_name)
-{
- ecm_db_connection_defunct_by_port(htons(port), ECM_DB_OBJ_DIR_FROM);
- ecm_db_connection_defunct_by_port(htons(port), ECM_DB_OBJ_DIR_TO);
-}
-EXPORT_SYMBOL(ecm_sfe_common_defunct_by_port);
-
-/*
- * ecm_sfe_common_callbacks_register()
- * Registers SFE common callbacks.
- */
-int ecm_sfe_common_callbacks_register(struct ecm_sfe_common_callbacks *sfe_cb)
-{
- if (!sfe_cb || !sfe_cb->l2_accel_check) {
- DEBUG_ERROR("SFE L2 acceleration check callback is NULL\n");
- return -EINVAL;
- }
-
- rcu_assign_pointer(ecm_sfe_cb.l2_accel_check, sfe_cb->l2_accel_check);
- synchronize_rcu();
-
- return 0;
-}
-EXPORT_SYMBOL(ecm_sfe_common_callbacks_register);
-
-/*
- * ecm_sfe_common_callbacks_unregister()
- * Unregisters SFE common callbacks.
- */
-void ecm_sfe_common_callbacks_unregister(void)
-{
- rcu_assign_pointer(ecm_sfe_cb.l2_accel_check, NULL);
- synchronize_rcu();
-}
-EXPORT_SYMBOL(ecm_sfe_common_callbacks_unregister);
diff --git a/qca-nss-ecm/frontends/sfe/ecm_sfe_common.h b/qca-nss-ecm/frontends/sfe/ecm_sfe_common.h
index 54625f3..cb240f9 100644
--- a/qca-nss-ecm/frontends/sfe/ecm_sfe_common.h
+++ b/qca-nss-ecm/frontends/sfe/ecm_sfe_common.h
@@ -15,13 +15,6 @@
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
-#include "ecm_sfe_common_public.h"
-
-/*
- * Export the callback object for frontend usage.
- */
-extern struct ecm_sfe_common_callbacks ecm_sfe_cb;
-
#ifdef CONFIG_XFRM
/*
* Which type of ipsec process traffic need.
@@ -155,6 +148,3 @@
uint32_t ecm_sfe_common_get_stats_bitmap(struct ecm_sfe_common_fe_info *fe_info, ecm_db_obj_dir_t dir);
void ecm_sfe_common_set_stats_bitmap(struct ecm_sfe_common_fe_info *fe_info, ecm_db_obj_dir_t dir, uint8_t bit);
void ecm_sfe_common_update_rule(struct ecm_front_end_connection_instance *feci, enum ecm_rule_update_type type, void *arg);
-void ecm_sfe_common_tuple_set(struct ecm_front_end_connection_instance *feci,
- int32_t from_iface_id, int32_t to_iface_id,
- struct ecm_sfe_common_tuple *tuple);
diff --git a/qca-nss-ecm/frontends/sfe/ecm_sfe_non_ported_ipv4.c b/qca-nss-ecm/frontends/sfe/ecm_sfe_non_ported_ipv4.c
index ab92a8a..9ebc5d4 100644
--- a/qca-nss-ecm/frontends/sfe/ecm_sfe_non_ported_ipv4.c
+++ b/qca-nss-ecm/frontends/sfe/ecm_sfe_non_ported_ipv4.c
@@ -1101,8 +1101,9 @@
if (ecm_interface_src_check || ecm_db_connection_is_pppoe_bridged_get(feci->ci)) {
DEBUG_INFO("%px: Source interface check flag is enabled\n", nnpci);
- nircm->rule_flags |= SFE_RULE_CREATE_FLAG_FLOW_SRC_INTERFACE_CHECK;
- nircm->rule_flags |= SFE_RULE_CREATE_FLAG_RETURN_SRC_INTERFACE_CHECK;
+ /*
+ * TO DO: No interface check rule create message type defined in SFE's API.
+ */
}
if (pr->process_actions & ECM_CLASSIFIER_PROCESS_ACTION_QOS_TAG) {
@@ -1460,7 +1461,7 @@
* If connection became defunct then set mode so that no further accel/decel attempts occur.
*/
if (feci->is_defunct) {
- feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT_SHORT;
+ feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT;
}
spin_unlock_bh(&feci->lock);
diff --git a/qca-nss-ecm/frontends/sfe/ecm_sfe_non_ported_ipv6.c b/qca-nss-ecm/frontends/sfe/ecm_sfe_non_ported_ipv6.c
index 9c99fae..afc33ce 100644
--- a/qca-nss-ecm/frontends/sfe/ecm_sfe_non_ported_ipv6.c
+++ b/qca-nss-ecm/frontends/sfe/ecm_sfe_non_ported_ipv6.c
@@ -1018,8 +1018,6 @@
*/
if (ecm_interface_src_check || ecm_db_connection_is_pppoe_bridged_get(feci->ci)) {
DEBUG_INFO("%px: Source interface check flag is enabled\n", nnpci);
- nircm->rule_flags |= SFE_RULE_CREATE_FLAG_FLOW_SRC_INTERFACE_CHECK;
- nircm->rule_flags |= SFE_RULE_CREATE_FLAG_RETURN_SRC_INTERFACE_CHECK;
}
/*
@@ -1342,7 +1340,7 @@
* If connection became defunct then set mode so that no further accel/decel attempts occur.
*/
if (feci->is_defunct) {
- feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT_SHORT;
+ feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT;
}
spin_unlock_bh(&feci->lock);
diff --git a/qca-nss-ecm/frontends/sfe/ecm_sfe_ported_ipv4.c b/qca-nss-ecm/frontends/sfe/ecm_sfe_ported_ipv4.c
index c3f5d73..524e118 100644
--- a/qca-nss-ecm/frontends/sfe/ecm_sfe_ported_ipv4.c
+++ b/qca-nss-ecm/frontends/sfe/ecm_sfe_ported_ipv4.c
@@ -165,7 +165,6 @@
struct ecm_front_end_connection_instance *feci;
struct ecm_sfe_ported_ipv4_connection_instance *npci;
ecm_front_end_acceleration_mode_t result_mode;
- bool is_defunct = false;
/*
* Is this a response to a create message?
@@ -328,28 +327,10 @@
DEBUG_INFO("%px: Decelerate was pending\n", ci);
- /*
- * Check if the pending decelerate was done with the defunct process.
- * If it was, set the is_defunct flag of the feci to false for re-try.
- */
- if (feci->is_defunct) {
- is_defunct = feci->is_defunct;
- feci->is_defunct = false;
- }
-
spin_unlock_bh(&ecm_sfe_ipv4_lock);
spin_unlock_bh(&feci->lock);
- /*
- * If the pending decelerate was done through defunct process, we should
- * re-try it here with the same defunct function, because the purpose of that
- * process is to remove the connection from the database as well after decelerating it.
- */
- if (is_defunct) {
- ecm_db_connection_make_defunct(ci);
- } else {
- feci->decelerate(feci);
- }
+ feci->decelerate(feci);
/*
* Release the connection.
@@ -373,7 +354,7 @@
interface_num = msg->conn_rule.flow_interface_num;
}
if (ecm_sfe_common_fast_xmit_check(interface_num)) {
- msg->rule_flags |= SFE_RULE_CREATE_FLAG_RETURN_TRANSMIT_FAST;
+ msg->rule_flags |= SFE_RULE_CREATE_FLAG_FLOW_TRANSMIT_FAST;
}
interface_num = msg->conn_rule.return_top_interface_num;
@@ -381,7 +362,7 @@
interface_num = msg->conn_rule.return_interface_num;
}
if (ecm_sfe_common_fast_xmit_check(interface_num)) {
- msg->rule_flags |= SFE_RULE_CREATE_FLAG_FLOW_TRANSMIT_FAST;
+ msg->rule_flags |= SFE_RULE_CREATE_FLAG_RETURN_TRANSMIT_FAST;
}
rcu_read_unlock_bh();
@@ -421,8 +402,6 @@
uint8_t dest_mac_xlate[ETH_ALEN];
ecm_db_direction_t ecm_dir;
ecm_front_end_acceleration_mode_t result_mode;
- uint32_t l2_accel_bits = (ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED | ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED);
- ecm_sfe_common_l2_accel_check_callback_t l2_accel_check;
DEBUG_CHECK_MAGIC(npci, ECM_SFE_PORTED_IPV4_CONNECTION_INSTANCE_MAGIC, "%px: magic failed", npci);
@@ -509,23 +488,6 @@
nircm->conn_rule.return_interface_num = to_sfe_iface_id;
/*
- * Check which side of the connection can support L2 acceleration.
- * The check is done only for the routed flows and if the L2 feature is enabled.
- */
- if (sfe_is_l2_feature_enabled() && ecm_db_connection_is_routed_get(feci->ci)) {
- rcu_read_lock();
- l2_accel_check = rcu_dereference(ecm_sfe_cb.l2_accel_check);
- if (l2_accel_check) {
- struct ecm_sfe_common_tuple l2_accel_tuple;
-
- ecm_sfe_common_tuple_set(feci, from_sfe_iface_id, to_sfe_iface_id, &l2_accel_tuple);
-
- l2_accel_bits = l2_accel_check(&l2_accel_tuple);
- }
- rcu_read_unlock();
- }
-
- /*
* Set interface numbers involved in accelerating this connection.
* These are the inner facing addresses from the heirarchy interface lists we got above.
*/
@@ -582,7 +544,7 @@
break;
}
- if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_BRIDGE, list_index, from_ifaces_first) && (l2_accel_bits & ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED)) {
+ if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_BRIDGE, list_index, from_ifaces_first)) {
nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE;
feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_FROM, ECM_DB_IFACE_TYPE_BRIDGE);
}
@@ -608,7 +570,7 @@
break;
}
- if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_OVS_BRIDGE, list_index, from_ifaces_first) && (l2_accel_bits & ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED)) {
+ if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_OVS_BRIDGE, list_index, from_ifaces_first)) {
nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE;
feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_FROM, ECM_DB_IFACE_TYPE_OVS_BRIDGE);
}
@@ -661,15 +623,6 @@
break;
}
- /*
- * If external module decide that L2 acceleration is not allowed, we should return
- * without setting PPPoE parameters.
- */
- if (!(l2_accel_bits & ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED)) {
- DEBUG_TRACE("%px: L2 acceleration is not allowed for the PPPoE interface\n", npci);
- break;
- }
-
feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_FROM, ECM_DB_IFACE_TYPE_PPPOE);
/*
@@ -723,7 +676,7 @@
}
nircm->valid_flags |= SFE_RULE_CREATE_VLAN_VALID;
- if (sfe_is_l2_feature_enabled() && (l2_accel_bits & ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED)) {
+ if (sfe_is_l2_feature_enabled()) {
nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE;
feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_FROM, ECM_DB_IFACE_TYPE_VLAN);
@@ -750,7 +703,7 @@
break;
case ECM_DB_IFACE_TYPE_MACVLAN:
#ifdef ECM_INTERFACE_MACVLAN_ENABLE
- if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_MACVLAN, list_index, from_ifaces_first) && (l2_accel_bits & ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED)) {
+ if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_MACVLAN, list_index, from_ifaces_first)) {
nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE;
feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_FROM, ECM_DB_IFACE_TYPE_MACVLAN);
}
@@ -787,7 +740,7 @@
case ECM_DB_IFACE_TYPE_LAG:
#ifdef ECM_INTERFACE_BOND_ENABLE
- if (sfe_is_l2_feature_enabled() && (l2_accel_bits & ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED)) {
+ if (sfe_is_l2_feature_enabled()) {
nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE;
/*
* LAG device gets its stats by summing up all stats of its
@@ -869,7 +822,7 @@
break;
}
- if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_BRIDGE, list_index, to_ifaces_first) && (l2_accel_bits & ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED)) {
+ if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_BRIDGE, list_index, to_ifaces_first)) {
nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE;
feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_TO, ECM_DB_IFACE_TYPE_BRIDGE);
}
@@ -896,7 +849,7 @@
break;
}
- if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_OVS_BRIDGE, list_index, to_ifaces_first) && (l2_accel_bits & ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED)) {
+ if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_OVS_BRIDGE, list_index, to_ifaces_first)) {
nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE;
feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_TO, ECM_DB_IFACE_TYPE_OVS_BRIDGE);
}
@@ -949,15 +902,6 @@
break;
}
- /*
- * If external module decide that L2 acceleration is not allowed, we should return
- * without setting PPPoE parameters.
- */
- if (!(l2_accel_bits & ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED)) {
- DEBUG_TRACE("%px: L2 acceleration is not allowed for the PPPoE interface\n", npci);
- break;
- }
-
feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_TO, ECM_DB_IFACE_TYPE_PPPOE);
/*
@@ -1010,7 +954,7 @@
}
nircm->valid_flags |= SFE_RULE_CREATE_VLAN_VALID;
- if (sfe_is_l2_feature_enabled() && (l2_accel_bits & ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED)) {
+ if (sfe_is_l2_feature_enabled()) {
nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE;
feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_TO, ECM_DB_IFACE_TYPE_VLAN);
@@ -1038,7 +982,7 @@
case ECM_DB_IFACE_TYPE_MACVLAN:
#ifdef ECM_INTERFACE_MACVLAN_ENABLE
- if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_MACVLAN, list_index, to_ifaces_first) && (l2_accel_bits & ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED)) {
+ if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_MACVLAN, list_index, to_ifaces_first)) {
nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE;
feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_TO, ECM_DB_IFACE_TYPE_MACVLAN);
}
@@ -1075,7 +1019,7 @@
case ECM_DB_IFACE_TYPE_LAG:
#ifdef ECM_INTERFACE_BOND_ENABLE
- if (sfe_is_l2_feature_enabled() && (l2_accel_bits & ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED)) {
+ if (sfe_is_l2_feature_enabled()) {
nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE;
/*
* LAG device gets its stats by summing up all stats of its
@@ -1126,29 +1070,6 @@
}
}
- if (ecm_interface_src_check) {
- DEBUG_INFO("%px: Source interface check flag is enabled\n", npci);
- nircm->rule_flags |= SFE_RULE_CREATE_FLAG_FLOW_SRC_INTERFACE_CHECK;
- nircm->rule_flags |= SFE_RULE_CREATE_FLAG_RETURN_SRC_INTERFACE_CHECK;
- }
-
- /*
- * Enable source interface check without flushing the rule for this flow to re-inject the packet to
- * the network stack in SFE driver after the first pass of the packet coming with the L2 interface.
- * In the second pass, the packet will come to SFE with the L3 interface. If there are more than 3 interfaces
- * in the hierarchy, the packet will be re-injected to the stack until the flows input interface matches with the
- * rule's match_dev.
- */
- if (!(l2_accel_bits & ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED)) {
- nircm->rule_flags |= SFE_RULE_CREATE_FLAG_FLOW_SRC_INTERFACE_CHECK;
- nircm->rule_flags |= SFE_RULE_CREATE_FLAG_FLOW_SRC_INTERFACE_CHECK_NO_FLUSH;
- }
-
- if (!(l2_accel_bits & ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED)) {
- nircm->rule_flags |= SFE_RULE_CREATE_FLAG_RETURN_SRC_INTERFACE_CHECK;
- nircm->rule_flags |= SFE_RULE_CREATE_FLAG_RETURN_SRC_INTERFACE_CHECK_NO_FLUSH;
- }
-
/*
* Set up the flow and return qos tags
*/
@@ -1657,7 +1578,7 @@
* If connection became defunct then set mode so that no further accel/decel attempts occur.
*/
if (feci->is_defunct) {
- feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT_SHORT;
+ feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT;
}
spin_unlock_bh(&feci->lock);
diff --git a/qca-nss-ecm/frontends/sfe/ecm_sfe_ported_ipv6.c b/qca-nss-ecm/frontends/sfe/ecm_sfe_ported_ipv6.c
index a01ee76..fe0d642 100644
--- a/qca-nss-ecm/frontends/sfe/ecm_sfe_ported_ipv6.c
+++ b/qca-nss-ecm/frontends/sfe/ecm_sfe_ported_ipv6.c
@@ -168,7 +168,6 @@
ip_addr_t flow_ip;
ip_addr_t return_ip;
ecm_front_end_acceleration_mode_t result_mode;
- bool is_defunct = false;
/*
* Is this a response to a create message?
@@ -334,28 +333,10 @@
DEBUG_INFO("%px: Decelerate was pending\n", ci);
- /*
- * Check if the pending decelerate was done with the defunct process.
- * If it was, set the is_defunct flag of the feci to false for re-try.
- */
- if (feci->is_defunct) {
- is_defunct = feci->is_defunct;
- feci->is_defunct = false;
- }
-
spin_unlock_bh(&ecm_sfe_ipv6_lock);
spin_unlock_bh(&feci->lock);
- /*
- * If the pending decelerate was done through defunct process, we should
- * re-try it here with the same defunct function, because the purpose of that
- * process is to remove the connection from the database as well after decelerating it.
- */
- if (is_defunct) {
- ecm_db_connection_make_defunct(ci);
- } else {
- feci->decelerate(feci);
- }
+ feci->decelerate(feci);
/*
* Release the connection.
@@ -379,7 +360,7 @@
interface_num = msg->conn_rule.flow_interface_num;
}
if (ecm_sfe_common_fast_xmit_check(interface_num)) {
- msg->rule_flags |= SFE_RULE_CREATE_FLAG_RETURN_TRANSMIT_FAST;
+ msg->rule_flags |= SFE_RULE_CREATE_FLAG_FLOW_TRANSMIT_FAST;
}
interface_num = msg->conn_rule.return_top_interface_num;
@@ -387,7 +368,7 @@
interface_num = msg->conn_rule.return_interface_num;
}
if (ecm_sfe_common_fast_xmit_check(interface_num)) {
- msg->rule_flags |= SFE_RULE_CREATE_FLAG_FLOW_TRANSMIT_FAST;
+ msg->rule_flags |= SFE_RULE_CREATE_FLAG_RETURN_TRANSMIT_FAST;
}
rcu_read_unlock_bh();
@@ -426,8 +407,6 @@
ip_addr_t src_ip;
ip_addr_t dest_ip;
ecm_front_end_acceleration_mode_t result_mode;
- uint32_t l2_accel_bits = (ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED | ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED);
- ecm_sfe_common_l2_accel_check_callback_t l2_accel_check;
DEBUG_CHECK_MAGIC(npci, ECM_SFE_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%px: magic failed", npci);
@@ -514,23 +493,6 @@
nircm->conn_rule.return_interface_num = to_sfe_iface_id;
/*
- * Check which side of the connection can support L2 acceleration.
- * The check is done only for the routed flows and if the L2 feature is enabled.
- */
- if (sfe_is_l2_feature_enabled() && ecm_db_connection_is_routed_get(feci->ci)) {
- rcu_read_lock();
- l2_accel_check = rcu_dereference(ecm_sfe_cb.l2_accel_check);
- if (l2_accel_check) {
- struct ecm_sfe_common_tuple l2_accel_tuple;
-
- ecm_sfe_common_tuple_set(feci, from_sfe_iface_id, to_sfe_iface_id, &l2_accel_tuple);
-
- l2_accel_bits = l2_accel_check(&l2_accel_tuple);
- }
- rcu_read_unlock();
- }
-
- /*
* Set interface numbers involved in accelerating this connection.
* These are the inner facing addresses from the heirarchy interface lists we got above.
*/
@@ -587,7 +549,7 @@
break;
}
- if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_BRIDGE, list_index, from_ifaces_first) && (l2_accel_bits & ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED)) {
+ if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_BRIDGE, list_index, from_ifaces_first)) {
nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE;
feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_FROM, ECM_DB_IFACE_TYPE_BRIDGE);
}
@@ -613,7 +575,7 @@
break;
}
- if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_OVS_BRIDGE, list_index, from_ifaces_first) && (l2_accel_bits & ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED)) {
+ if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_OVS_BRIDGE, list_index, from_ifaces_first)) {
nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE;
feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_FROM, ECM_DB_IFACE_TYPE_OVS_BRIDGE);
}
@@ -666,15 +628,6 @@
break;
}
- /*
- * If external module decide that L2 acceleration is not allowed, we should return
- * without setting PPPoE parameters.
- */
- if (!(l2_accel_bits & ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED)) {
- DEBUG_TRACE("%px: L2 acceleration is not allowed for the PPPoE interface\n", npci);
- break;
- }
-
feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_FROM, ECM_DB_IFACE_TYPE_PPPOE);
/*
@@ -728,7 +681,7 @@
}
nircm->valid_flags |= SFE_RULE_CREATE_VLAN_VALID;
- if (sfe_is_l2_feature_enabled() && (l2_accel_bits & ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED)) {
+ if (sfe_is_l2_feature_enabled()) {
nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE;
feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_FROM, ECM_DB_IFACE_TYPE_VLAN);
@@ -756,7 +709,7 @@
case ECM_DB_IFACE_TYPE_MACVLAN:
#ifdef ECM_INTERFACE_MACVLAN_ENABLE
- if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_MACVLAN, list_index, from_ifaces_first) && (l2_accel_bits & ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED)) {
+ if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_MACVLAN, list_index, from_ifaces_first)) {
nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE;
feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_FROM, ECM_DB_IFACE_TYPE_MACVLAN);
}
@@ -793,7 +746,7 @@
case ECM_DB_IFACE_TYPE_LAG:
#ifdef ECM_INTERFACE_BOND_ENABLE
- if (sfe_is_l2_feature_enabled() && (l2_accel_bits & ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED)) {
+ if (sfe_is_l2_feature_enabled()) {
nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE;
/*
* LAG device gets its stats by summing up all stats of its
@@ -875,7 +828,7 @@
break;
}
- if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_BRIDGE, list_index, to_ifaces_first) && (l2_accel_bits & ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED)) {
+ if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_BRIDGE, list_index, to_ifaces_first)) {
nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE;
feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_TO, ECM_DB_IFACE_TYPE_BRIDGE);
}
@@ -902,7 +855,7 @@
break;
}
- if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_OVS_BRIDGE, list_index, to_ifaces_first) && (l2_accel_bits & ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED)) {
+ if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_OVS_BRIDGE, list_index, to_ifaces_first)) {
nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE;
feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_TO, ECM_DB_IFACE_TYPE_OVS_BRIDGE);
}
@@ -955,15 +908,6 @@
break;
}
- /*
- * If external module decide that L2 acceleration is not allowed, we should return
- * without setting PPPoE parameters.
- */
- if (!(l2_accel_bits & ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED)) {
- DEBUG_TRACE("%px: L2 acceleration is not allowed for the PPPoE interface\n", npci);
- break;
- }
-
feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_FROM, ECM_DB_IFACE_TYPE_PPPOE);
/*
@@ -1016,7 +960,7 @@
}
nircm->valid_flags |= SFE_RULE_CREATE_VLAN_VALID;
- if (sfe_is_l2_feature_enabled() && (l2_accel_bits & ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED)) {
+ if (sfe_is_l2_feature_enabled()) {
nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE;
feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_TO, ECM_DB_IFACE_TYPE_VLAN);
@@ -1044,7 +988,7 @@
case ECM_DB_IFACE_TYPE_MACVLAN:
#ifdef ECM_INTERFACE_MACVLAN_ENABLE
- if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_MACVLAN, list_index, to_ifaces_first) && (l2_accel_bits & ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED)) {
+ if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_MACVLAN, list_index, to_ifaces_first)) {
nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE;
feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_TO, ECM_DB_IFACE_TYPE_MACVLAN);
}
@@ -1081,7 +1025,7 @@
case ECM_DB_IFACE_TYPE_LAG:
#ifdef ECM_INTERFACE_BOND_ENABLE
- if (sfe_is_l2_feature_enabled() && (l2_accel_bits & ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED)) {
+ if (sfe_is_l2_feature_enabled()) {
nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE;
/*
* LAG device gets its stats by summing up all stats of its
@@ -1132,29 +1076,6 @@
}
}
- if (ecm_interface_src_check) {
- DEBUG_INFO("%px: Source interface check flag is enabled\n", npci);
- nircm->rule_flags |= SFE_RULE_CREATE_FLAG_FLOW_SRC_INTERFACE_CHECK;
- nircm->rule_flags |= SFE_RULE_CREATE_FLAG_RETURN_SRC_INTERFACE_CHECK;
- }
-
- /*
- * Enable source interface check without flushing the rule for this flow to re-inject the packet to
- * the network stack in SFE driver after the first pass of the packet coming with the L2 interface.
- * In the second pass, the packet will come to SFE with the L3 interface. If there are more than 3 interfaces
- * in the hierarchy, the packet will be re-injected to the stack until the flows input interface matches with the
- * rule's match_dev.
- */
- if (!(l2_accel_bits & ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED)) {
- nircm->rule_flags |= SFE_RULE_CREATE_FLAG_FLOW_SRC_INTERFACE_CHECK;
- nircm->rule_flags |= SFE_RULE_CREATE_FLAG_FLOW_SRC_INTERFACE_CHECK_NO_FLUSH;
- }
-
- if (!(l2_accel_bits & ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED)) {
- nircm->rule_flags |= SFE_RULE_CREATE_FLAG_RETURN_SRC_INTERFACE_CHECK;
- nircm->rule_flags |= SFE_RULE_CREATE_FLAG_RETURN_SRC_INTERFACE_CHECK_NO_FLUSH;
- }
-
/*
* Set up the flow and return qos tags
*/
@@ -1601,7 +1522,7 @@
* If connection became defunct then set mode so that no further accel/decel attempts occur.
*/
if (feci->is_defunct) {
- feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT_SHORT;
+ feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT;
}
spin_unlock_bh(&feci->lock);
diff --git a/qca-nss-sfe/exports/sfe_api.h b/qca-nss-sfe/exports/sfe_api.h
index 1bcfedc..d0d28d0 100644
--- a/qca-nss-sfe/exports/sfe_api.h
+++ b/qca-nss-sfe/exports/sfe_api.h
@@ -44,12 +44,9 @@
#define SFE_RULE_CREATE_FLAG_L2_ENCAP (1<<7) /**< consists of an encapsulating protocol that carries an IPv4 payload within it. */
#define SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE (1<<8) /**< Use flow interface number instead of top interface. */
#define SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE (1<<9) /**< Use return interface number instead of top interface. */
-#define SFE_RULE_CREATE_FLAG_FLOW_SRC_INTERFACE_CHECK (1<<10) /**< Check source interface on the flow direction . */
-#define SFE_RULE_CREATE_FLAG_RETURN_SRC_INTERFACE_CHECK (1<<11) /**< Check source interface on the return direction . */
-#define SFE_RULE_CREATE_FLAG_FLOW_TRANSMIT_FAST (1<<12) /**< original flow transmit fast. */
-#define SFE_RULE_CREATE_FLAG_RETURN_TRANSMIT_FAST (1<<13) /**< return flow transmit fast. */
-#define SFE_RULE_CREATE_FLAG_FLOW_SRC_INTERFACE_CHECK_NO_FLUSH (1<<14) /**< Check source interface on the flow direction but do not flush the connection. */
-#define SFE_RULE_CREATE_FLAG_RETURN_SRC_INTERFACE_CHECK_NO_FLUSH (1<<15) /**< Check source interface on the return direction but do not flush the connection. */
+#define SFE_RULE_CREATE_FLAG_SRC_INTERFACE_CHECK (1<<10) /**< Check source interface. */
+#define SFE_RULE_CREATE_FLAG_FLOW_TRANSMIT_FAST (1<<11) /**< original flow transmit fast. */
+#define SFE_RULE_CREATE_FLAG_RETURN_TRANSMIT_FAST (1<<12) /**< return flow transmit fast. */
/**
* Rule creation validity flags.
diff --git a/qca-nss-sfe/sfe.c b/qca-nss-sfe/sfe.c
index 0195d41..f665318 100644
--- a/qca-nss-sfe/sfe.c
+++ b/qca-nss-sfe/sfe.c
@@ -119,15 +119,6 @@
int32_t l2_feature_support; /* L2 feature support */
- /*
- * SFE Bypass Mode.
- * When enabled, SFE's shortcut path will be bypassed.
- * 0: Disabled.
- * 1: Bypass all packets.
- * 2: Bypass only packets with fwmark matches bypass_mark.
- */
- int bypass_mode;
- u32 bypass_mark;
};
static struct sfe_ctx_instance_internal __sfe_ctx;
@@ -1270,23 +1261,18 @@
*/
int sfe_recv(struct sk_buff *skb)
{
- struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
struct net_device *dev;
struct sfe_l2_info l2_info;
int ret;
- dev = skb->dev;
-
/*
- * Apply SFE Bypass Mode policy.
+ * We know that for the vast majority of packets we need the transport
+ * layer header so we may as well start to fetch it now!
*/
- if (unlikely(sfe_ctx->bypass_mode == 1)) {
- return 0;
- }
- if (unlikely(sfe_ctx->bypass_mode == 2 && sfe_ctx->bypass_mark &&
- skb->mark == sfe_ctx->bypass_mark)) {
- return 0;
- }
+ prefetch(skb->data + 32);
+ barrier();
+
+ dev = skb->dev;
/*
* Setting parse flags to 0 since l2_info is passed for non L2.5 header case as well
@@ -1489,69 +1475,6 @@
__ATTR(l2_feature, 0644, sfe_get_l2_feature, sfe_set_l2_feature);
/*
- * SFE Bypass Mode
- */
-static ssize_t
-sfe_get_bypass_mode(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
- return snprintf(buf, (ssize_t)PAGE_SIZE, "%d\n", sfe_ctx->bypass_mode);
-}
-
-static ssize_t
-sfe_set_bypass_mode(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
- int ret;
- int bypass_mode;
-
- ret = kstrtou32(buf, 0, &bypass_mode);
- if (ret) {
- return ret;
- }
- if (bypass_mode > 2 || bypass_mode < 0) {
- return -EINVAL;
- }
- sfe_ctx->bypass_mode = bypass_mode;
- return count;
-}
-
-static const struct device_attribute sfe_bypass_mode_attr =
- __ATTR(bypass_mode, S_IWUSR | S_IRUGO, sfe_get_bypass_mode,
- sfe_set_bypass_mode);
-
-static ssize_t
-sfe_get_bypass_mark(struct device *dev, struct device_attribute *attr,
- char *buf)
-{
- struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
- return snprintf(buf, (ssize_t)PAGE_SIZE, "0x%x\n",
- sfe_ctx->bypass_mark);
-}
-
-static ssize_t
-sfe_set_bypass_mark(struct device *dev, struct device_attribute *attr,
- const char *buf, size_t count)
-{
- struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
- int ret;
- int bypass_mark;
-
- ret = kstrtou32(buf, 0, &bypass_mark);
- if (ret) {
- return ret;
- }
- sfe_ctx->bypass_mark = bypass_mark;
- return count;
-}
-
-static const struct device_attribute sfe_bypass_mark_attr =
- __ATTR(bypass_mark, S_IWUSR | S_IRUGO, sfe_get_bypass_mark,
- sfe_set_bypass_mark);
-
-/*
* sfe_init_if()
*/
int sfe_init_if(void)
@@ -1588,21 +1511,6 @@
goto exit2;
}
- result = sysfs_create_file(sfe_ctx->sys_sfe,
- &sfe_bypass_mode_attr.attr);
- if (result) {
- DEBUG_ERROR("failed to register Bypass Mode sysfs file: %d\n",
- result);
- goto exit2;
- }
- result = sysfs_create_file(sfe_ctx->sys_sfe,
- &sfe_bypass_mark_attr.attr);
- if (result) {
- DEBUG_ERROR("failed to register Bypass Mark sysfs file: %d\n",
- result);
- goto exit2;
- }
-
spin_lock_init(&sfe_ctx->lock);
INIT_LIST_HEAD(&sfe_ctx->msg_queue);
diff --git a/qca-nss-sfe/sfe_ipv4.c b/qca-nss-sfe/sfe_ipv4.c
index 1fd2883..0598b68 100644
--- a/qca-nss-sfe/sfe_ipv4.c
+++ b/qca-nss-sfe/sfe_ipv4.c
@@ -1121,7 +1121,7 @@
/*
* Allocate the various connection tracking objects.
*/
- c = (struct sfe_ipv4_connection *)kzalloc(sizeof(struct sfe_ipv4_connection), GFP_ATOMIC);
+ c = (struct sfe_ipv4_connection *)kmalloc(sizeof(struct sfe_ipv4_connection), GFP_ATOMIC);
if (unlikely(!c)) {
DEBUG_WARN("%px: memory allocation of connection entry failed\n", msg);
this_cpu_inc(si->stats_pcpu->connection_create_failures64);
@@ -1130,7 +1130,7 @@
return -ENOMEM;
}
- original_cm = (struct sfe_ipv4_connection_match *)kzalloc(sizeof(struct sfe_ipv4_connection_match), GFP_ATOMIC);
+ original_cm = (struct sfe_ipv4_connection_match *)kmalloc(sizeof(struct sfe_ipv4_connection_match), GFP_ATOMIC);
if (unlikely(!original_cm)) {
DEBUG_WARN("%px: memory allocation of connection match entry failed\n", msg);
this_cpu_inc(si->stats_pcpu->connection_create_failures64);
@@ -1140,7 +1140,7 @@
return -ENOMEM;
}
- reply_cm = (struct sfe_ipv4_connection_match *)kzalloc(sizeof(struct sfe_ipv4_connection_match), GFP_ATOMIC);
+ reply_cm = (struct sfe_ipv4_connection_match *)kmalloc(sizeof(struct sfe_ipv4_connection_match), GFP_ATOMIC);
if (unlikely(!reply_cm)) {
DEBUG_WARN("%px: memory allocation of connection match entry failed\n", msg);
this_cpu_inc(si->stats_pcpu->connection_create_failures64);
@@ -1221,11 +1221,18 @@
original_cm->xlate_src_port = 0;
}
+ atomic_set(&original_cm->rx_packet_count, 0);
+ original_cm->rx_packet_count64 = 0;
+ atomic_set(&original_cm->rx_byte_count, 0);
+ original_cm->rx_byte_count64 = 0;
+
original_cm->xmit_dev = dest_dev;
original_cm->xmit_dev_mtu = msg->conn_rule.return_mtu;
original_cm->connection = c;
original_cm->counter_match = reply_cm;
+ original_cm->l2_hdr_size = 0;
+ original_cm->flags = 0;
/*
* UDP Socket is valid only in decap direction.
@@ -1303,13 +1310,12 @@
}
}
- if (msg->rule_flags & SFE_RULE_CREATE_FLAG_FLOW_SRC_INTERFACE_CHECK) {
+ reply_cm->l2_hdr_size = 0;
+ if (msg->rule_flags & SFE_RULE_CREATE_FLAG_SRC_INTERFACE_CHECK) {
original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK;
}
- if (msg->rule_flags & SFE_RULE_CREATE_FLAG_FLOW_SRC_INTERFACE_CHECK_NO_FLUSH) {
- original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK_NO_FLUSH;
- }
+ reply_cm->flags = 0;
/*
* Adding PPPoE parameters to original and reply entries based on the direction where
@@ -1345,14 +1351,10 @@
ether_addr_copy(reply_cm->pppoe_remote_mac, msg->pppoe_rule.return_pppoe_remote_mac);
}
- if (msg->rule_flags & SFE_RULE_CREATE_FLAG_RETURN_SRC_INTERFACE_CHECK) {
+ if (msg->rule_flags & SFE_RULE_CREATE_FLAG_SRC_INTERFACE_CHECK) {
reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK;
}
- if (msg->rule_flags & SFE_RULE_CREATE_FLAG_RETURN_SRC_INTERFACE_CHECK_NO_FLUSH) {
- reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK_NO_FLUSH;
- }
-
/*
* For the non-arp interface, we don't write L2 HDR.
*/
@@ -1424,6 +1426,11 @@
reply_cm->xlate_src_port = 0;
}
+ atomic_set(&reply_cm->rx_packet_count, 0);
+ reply_cm->rx_packet_count64 = 0;
+ atomic_set(&reply_cm->rx_byte_count, 0);
+ reply_cm->rx_byte_count64 = 0;
+
reply_cm->xmit_dev = src_dev;
reply_cm->xmit_dev_mtu = msg->conn_rule.flow_mtu;
@@ -2505,71 +2512,6 @@
__ATTR(stats_work_cpu, S_IWUSR | S_IRUGO, sfe_ipv4_get_cpu, sfe_ipv4_set_cpu);
/*
- * DSCP rewrite table
- */
-static ssize_t
-sfe_ipv4_get_dscp_rewrite_mark_to_match(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct sfe_ipv4 *si = &__si;
- return snprintf(buf, (ssize_t)PAGE_SIZE, "0x%x\n",
- si->dscp_rewrite_mark_to_match);
-}
-
-static ssize_t
-sfe_ipv4_set_dscp_rewrite_mark_to_match(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- struct sfe_ipv4 *si = &__si;
- int ret;
- u32 mark_to_match;
-
- ret = kstrtou32(buf, 0, &mark_to_match);
- if (ret)
- return ret;
- si->dscp_rewrite_mark_to_match = mark_to_match;
- return size;
-}
-
-static const struct device_attribute sfe_ipv4_dscp_rewrite_mark_to_match_attr =
- __ATTR(dscp_rewrite_mark_to_match, S_IWUSR | S_IRUGO,
- sfe_ipv4_get_dscp_rewrite_mark_to_match,
- sfe_ipv4_set_dscp_rewrite_mark_to_match);
-
-static ssize_t
-sfe_ipv4_get_dscp_rewrite_dscp_to_set(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct sfe_ipv4 *si = &__si;
- return snprintf(buf, (ssize_t)PAGE_SIZE, "0x%x\n",
- si->dscp_rewrite_dscp_to_set >> SFE_IPV4_DSCP_SHIFT);
-}
-
-static ssize_t
-sfe_ipv4_set_dscp_rewrite_dscp_to_set(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- struct sfe_ipv4 *si = &__si;
- int ret;
- u32 dscp_to_set;
-
- ret = kstrtou32(buf, 0, &dscp_to_set);
- if (ret)
- return ret;
- si->dscp_rewrite_dscp_to_set = dscp_to_set << SFE_IPV4_DSCP_SHIFT;
- return size;
-}
-
-static const struct device_attribute sfe_ipv4_dscp_rewrite_dscp_to_set_attr =
- __ATTR(dscp_rewrite_dscp_to_set, S_IWUSR | S_IRUGO,
- sfe_ipv4_get_dscp_rewrite_dscp_to_set,
- sfe_ipv4_set_dscp_rewrite_dscp_to_set);
-
-/*
* sfe_ipv4_conn_match_hash_init()
* Initialize conn match hash lists
*/
@@ -2661,26 +2603,11 @@
goto exit3;
}
- result = sysfs_create_file(si->sys_ipv4,
- &sfe_ipv4_dscp_rewrite_mark_to_match_attr.attr);
- if (result) {
- DEBUG_ERROR("failed to register DSCP rewrite mark_to_match file: %d\n",
- result);
- goto exit4;
- }
- result = sysfs_create_file(si->sys_ipv4,
- &sfe_ipv4_dscp_rewrite_dscp_to_set_attr.attr);
- if (result) {
- DEBUG_ERROR("failed to register DSCP rewrite dscp_to_set file: %d\n",
- result);
- goto exit5;
- }
-
#ifdef CONFIG_NF_FLOW_COOKIE
result = sysfs_create_file(si->sys_ipv4, &sfe_ipv4_flow_cookie_attr.attr);
if (result) {
DEBUG_ERROR("failed to register flow cookie enable file: %d\n", result);
- goto exit6;
+ goto exit4;
}
#endif /* CONFIG_NF_FLOW_COOKIE */
@@ -2692,7 +2619,7 @@
#endif
if (result < 0) {
DEBUG_ERROR("can't register nf local out hook: %d\n", result);
- goto exit7;
+ goto exit5;
}
DEBUG_INFO("Register nf local out hook success: %d\n", result);
#endif
@@ -2702,7 +2629,7 @@
result = register_chrdev(0, "sfe_ipv4", &sfe_ipv4_debug_dev_fops);
if (result < 0) {
DEBUG_ERROR("Failed to register chrdev: %d\n", result);
- goto exit8;
+ goto exit6;
}
si->debug_dev = result;
@@ -2717,7 +2644,7 @@
spin_lock_init(&si->lock);
return 0;
-exit8:
+exit6:
#ifdef SFE_PROCESS_LOCAL_OUT
DEBUG_TRACE("sfe: Unregister local out hook\n");
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
@@ -2725,19 +2652,13 @@
#else
nf_unregister_net_hooks(&init_net, sfe_ipv4_ops_local_out, ARRAY_SIZE(sfe_ipv4_ops_local_out));
#endif
-exit7:
+exit5:
#endif
#ifdef CONFIG_NF_FLOW_COOKIE
sysfs_remove_file(si->sys_ipv4, &sfe_ipv4_flow_cookie_attr.attr);
-exit6:
-#endif /* CONFIG_NF_FLOW_COOKIE */
- sysfs_remove_file(si->sys_ipv4,
- &sfe_ipv4_dscp_rewrite_dscp_to_set_attr.attr);
-exit5:
- sysfs_remove_file(si->sys_ipv4,
- &sfe_ipv4_dscp_rewrite_mark_to_match_attr.attr);
exit4:
+#endif /* CONFIG_NF_FLOW_COOKIE */
sysfs_remove_file(si->sys_ipv4, &sfe_ipv4_cpu_attr.attr);
exit3:
sysfs_remove_file(si->sys_ipv4, &sfe_ipv4_debug_dev_attr.attr);
@@ -2781,10 +2702,6 @@
#ifdef CONFIG_NF_FLOW_COOKIE
sysfs_remove_file(si->sys_ipv4, &sfe_ipv4_flow_cookie_attr.attr);
#endif /* CONFIG_NF_FLOW_COOKIE */
- sysfs_remove_file(si->sys_ipv4,
- &sfe_ipv4_dscp_rewrite_dscp_to_set_attr.attr);
- sysfs_remove_file(si->sys_ipv4,
- &sfe_ipv4_dscp_rewrite_mark_to_match_attr.attr);
sysfs_remove_file(si->sys_ipv4, &sfe_ipv4_debug_dev_attr.attr);
sysfs_remove_file(si->sys_ipv4, &sfe_ipv4_cpu_attr.attr);
diff --git a/qca-nss-sfe/sfe_ipv4.h b/qca-nss-sfe/sfe_ipv4.h
index 4e8169b..48630db 100644
--- a/qca-nss-sfe/sfe_ipv4.h
+++ b/qca-nss-sfe/sfe_ipv4.h
@@ -69,7 +69,7 @@
#define SFE_IPV4_CONNECTION_MATCH_FLAG_INSERT_EGRESS_VLAN_TAG (1<<12)
/* Insert VLAN tag */
#define SFE_IPV4_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK (1<<13)
- /* Source interface check */
+ /* Source interface check.*/
#define SFE_IPV4_CONNECTION_MATCH_FLAG_PASSTHROUGH (1<<14)
/* passthrough flow: encap/decap to be skipped for this flow */
#define SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT (1<<15)
@@ -78,8 +78,6 @@
/* Fast xmit flow checked or not */
#define SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT_DEV_ADMISSION (1<<17)
/* Fast xmit may be possible for this flow, if SFE check passes */
-#define SFE_IPV4_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK_NO_FLUSH (1<<18)
- /* Source interface check but do not flush the connection */
/*
* IPv4 connection matching structure.
@@ -365,16 +363,6 @@
struct kobject *sys_ipv4; /* sysfs linkage */
int debug_dev; /* Major number of the debug char device */
u32 debug_read_seq; /* sequence number for debug dump */
-
- /*
- * DSCP rewrite table
- * When `mark_to_match` is set non-zero then any packet with the
- * specified skb->mark will override flow DSCP policy with
- * `dscp_to_set` value. i.e. basically equivalent to `iptables -m mark
- * --mark <mark_to_match> -j DSCP --set-dscp <dscp_to_set>`
- */
- u32 dscp_rewrite_mark_to_match;
- u32 dscp_rewrite_dscp_to_set;
};
/*
diff --git a/qca-nss-sfe/sfe_ipv4_gre.c b/qca-nss-sfe/sfe_ipv4_gre.c
index 9626a58..084ea3b 100644
--- a/qca-nss-sfe/sfe_ipv4_gre.c
+++ b/qca-nss-sfe/sfe_ipv4_gre.c
@@ -100,22 +100,19 @@
* Source interface validate.
*/
if (unlikely((cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK) && (cm->match_dev != dev))) {
- if (!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK_NO_FLUSH)) {
- struct sfe_ipv4_connection *c = cm->connection;
- int ret;
+ struct sfe_ipv4_connection *c = cm->connection;
+ int ret;
- DEBUG_TRACE("flush on source interface check failure\n");
- spin_lock_bh(&si->lock);
- ret = sfe_ipv4_remove_connection(si, c);
- spin_unlock_bh(&si->lock);
+ spin_lock_bh(&si->lock);
+ ret = sfe_ipv4_remove_connection(si, c);
+ spin_unlock_bh(&si->lock);
- if (ret) {
- sfe_ipv4_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
- }
+ if (ret) {
+ sfe_ipv4_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
}
rcu_read_unlock();
sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_INVALID_SRC_IFACE);
- DEBUG_TRACE("exception the packet on source interface check failure\n");
+ DEBUG_TRACE("flush on wrong source interface check failure\n");
return 0;
}
diff --git a/qca-nss-sfe/sfe_ipv4_tcp.c b/qca-nss-sfe/sfe_ipv4_tcp.c
index b2d5ec9..8de3269 100644
--- a/qca-nss-sfe/sfe_ipv4_tcp.c
+++ b/qca-nss-sfe/sfe_ipv4_tcp.c
@@ -194,20 +194,17 @@
* Source interface validate.
*/
if (unlikely((cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK) && (cm->match_dev != dev))) {
- if (!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK_NO_FLUSH)) {
- struct sfe_ipv4_connection *c = cm->connection;
- DEBUG_TRACE("flush on source interface check failure\n");
- spin_lock_bh(&si->lock);
- ret = sfe_ipv4_remove_connection(si, c);
- spin_unlock_bh(&si->lock);
+ struct sfe_ipv4_connection *c = cm->connection;
+ spin_lock_bh(&si->lock);
+ ret = sfe_ipv4_remove_connection(si, c);
+ spin_unlock_bh(&si->lock);
- if (ret) {
- sfe_ipv4_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
- }
+ if (ret) {
+ sfe_ipv4_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
}
rcu_read_unlock();
sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_INVALID_SRC_IFACE);
- DEBUG_TRACE("exception the packet on source interface check failure\n");
+ DEBUG_TRACE("flush on wrong source interface check failure\n");
return 0;
}
@@ -567,26 +564,9 @@
}
/*
- * Apply packet Mark.
- * If Mark was set by the Ingress Qdisc that takes precedence over
- * flow policy.
- */
- if (likely(skb->mark == 0)) {
- if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_MARK)) {
- skb->mark = cm->mark;
- }
- }
-
- /*
* Update DSCP
- * DSCP rewrite table takes precedence over flow policy.
*/
- if (unlikely(si->dscp_rewrite_mark_to_match != 0 &&
- si->dscp_rewrite_mark_to_match == skb->mark)) {
- iph->tos = (iph->tos & SFE_IPV4_DSCP_MASK) |
- si->dscp_rewrite_dscp_to_set;
- } else if (unlikely(cm->flags &
- SFE_IPV4_CONNECTION_MATCH_FLAG_DSCP_REMARK)) {
+ if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_DSCP_REMARK)) {
iph->tos = (iph->tos & SFE_IPV4_DSCP_MASK) | cm->dscp;
}
@@ -706,6 +686,13 @@
}
/*
+ * Mark outgoing packet
+ */
+ if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_MARK)) {
+ skb->mark = cm->mark;
+ }
+
+ /*
* For the first packets, check if it could got fast xmit.
*/
if (unlikely(!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT_FLOW_CHECKED)
diff --git a/qca-nss-sfe/sfe_ipv4_udp.c b/qca-nss-sfe/sfe_ipv4_udp.c
index 4b15f7c..1762d74 100644
--- a/qca-nss-sfe/sfe_ipv4_udp.c
+++ b/qca-nss-sfe/sfe_ipv4_udp.c
@@ -190,20 +190,17 @@
* Source interface validate.
*/
if (unlikely((cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK) && (cm->match_dev != dev))) {
- if (!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK_NO_FLUSH)) {
- struct sfe_ipv4_connection *c = cm->connection;
- DEBUG_TRACE("flush on source interface check failure\n");
- spin_lock_bh(&si->lock);
- ret = sfe_ipv4_remove_connection(si, c);
- spin_unlock_bh(&si->lock);
+ struct sfe_ipv4_connection *c = cm->connection;
+ spin_lock_bh(&si->lock);
+ ret = sfe_ipv4_remove_connection(si, c);
+ spin_unlock_bh(&si->lock);
- if (ret) {
- sfe_ipv4_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
- }
+ if (ret) {
+ sfe_ipv4_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
}
rcu_read_unlock();
sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_INVALID_SRC_IFACE);
- DEBUG_TRACE("exception the packet on source interface check failure\n");
+ DEBUG_TRACE("flush on wrong source interface check failure\n");
return 0;
}
@@ -473,26 +470,9 @@
}
/*
- * Apply packet Mark.
- * If Mark was set by the Ingress Qdisc that takes precedence over
- * flow policy.
- */
- if (likely(skb->mark == 0)) {
- if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_MARK)) {
- skb->mark = cm->mark;
- }
- }
-
- /*
* Update DSCP
- * DSCP rewrite table takes precedence over flow policy.
*/
- if (unlikely(si->dscp_rewrite_mark_to_match != 0 &&
- si->dscp_rewrite_mark_to_match == skb->mark)) {
- iph->tos = (iph->tos & SFE_IPV4_DSCP_MASK) |
- si->dscp_rewrite_dscp_to_set;
- } else if (unlikely(cm->flags &
- SFE_IPV4_CONNECTION_MATCH_FLAG_DSCP_REMARK)) {
+ if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_DSCP_REMARK)) {
iph->tos = (iph->tos & SFE_IPV4_DSCP_MASK) | cm->dscp;
}
@@ -549,6 +529,13 @@
}
/*
+ * Mark outgoing packet.
+ */
+ if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_MARK)) {
+ skb->mark = cm->mark;
+ }
+
+ /*
* For the first packets, check if it could got fast xmit.
*/
if (unlikely(!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT_FLOW_CHECKED)
diff --git a/qca-nss-sfe/sfe_ipv6.c b/qca-nss-sfe/sfe_ipv6.c
index a3ac446..cbd67ec 100644
--- a/qca-nss-sfe/sfe_ipv6.c
+++ b/qca-nss-sfe/sfe_ipv6.c
@@ -159,7 +159,6 @@
*/
hlist_for_each_entry_rcu(cm, lhead, hnode) {
if ((cm->match_dest_port != dest_port) ||
- (cm->match_src_port != src_port) ||
(!sfe_ipv6_addr_equal(cm->match_src_ip, src_ip)) ||
(!sfe_ipv6_addr_equal(cm->match_dest_ip, dest_ip)) ||
(cm->match_protocol != protocol) ||
@@ -1130,7 +1129,7 @@
/*
* Allocate the various connection tracking objects.
*/
- c = (struct sfe_ipv6_connection *)kzalloc(sizeof(struct sfe_ipv6_connection), GFP_ATOMIC);
+ c = (struct sfe_ipv6_connection *)kmalloc(sizeof(struct sfe_ipv6_connection), GFP_ATOMIC);
if (unlikely(!c)) {
DEBUG_WARN("%px: memory allocation of connection entry failed\n", msg);
this_cpu_inc(si->stats_pcpu->connection_create_failures64);
@@ -1139,7 +1138,7 @@
return -ENOMEM;
}
- original_cm = (struct sfe_ipv6_connection_match *)kzalloc(sizeof(struct sfe_ipv6_connection_match), GFP_ATOMIC);
+ original_cm = (struct sfe_ipv6_connection_match *)kmalloc(sizeof(struct sfe_ipv6_connection_match), GFP_ATOMIC);
if (unlikely(!original_cm)) {
this_cpu_inc(si->stats_pcpu->connection_create_failures64);
DEBUG_WARN("%px: memory allocation of connection match entry failed\n", msg);
@@ -1149,7 +1148,7 @@
return -ENOMEM;
}
- reply_cm = (struct sfe_ipv6_connection_match *)kzalloc(sizeof(struct sfe_ipv6_connection_match), GFP_ATOMIC);
+ reply_cm = (struct sfe_ipv6_connection_match *)kmalloc(sizeof(struct sfe_ipv6_connection_match), GFP_ATOMIC);
if (unlikely(!reply_cm)) {
this_cpu_inc(si->stats_pcpu->connection_create_failures64);
DEBUG_WARN("%px: memory allocation of connection match entry failed\n", msg);
@@ -1218,12 +1217,18 @@
original_cm->xlate_dest_ip[0] = *(struct sfe_ipv6_addr *)tuple->return_ip;
original_cm->xlate_dest_port = tuple->return_ident;
+ atomic_set(&original_cm->rx_packet_count, 0);
+ original_cm->rx_packet_count64 = 0;
+ atomic_set(&original_cm->rx_byte_count, 0);
+ original_cm->rx_byte_count64 = 0;
original_cm->xmit_dev = dest_dev;
original_cm->xmit_dev_mtu = msg->conn_rule.return_mtu;
original_cm->connection = c;
original_cm->counter_match = reply_cm;
+ original_cm->l2_hdr_size = 0;
+ original_cm->flags = 0;
/*
* Valid in decap direction only
@@ -1302,6 +1307,9 @@
}
}
+ reply_cm->l2_hdr_size = 0;
+ reply_cm->flags = 0;
+
/*
* Adding PPPoE parameters to original and reply entries based on the direction where
* PPPoE header is valid in ECM rule.
@@ -1336,14 +1344,10 @@
ether_addr_copy(reply_cm->pppoe_remote_mac, msg->pppoe_rule.return_pppoe_remote_mac);
}
- if (msg->rule_flags & SFE_RULE_CREATE_FLAG_FLOW_SRC_INTERFACE_CHECK) {
+ if (msg->rule_flags & SFE_RULE_CREATE_FLAG_SRC_INTERFACE_CHECK) {
original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK;
}
- if (msg->rule_flags & SFE_RULE_CREATE_FLAG_FLOW_SRC_INTERFACE_CHECK_NO_FLUSH) {
- original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK_NO_FLUSH;
- }
-
/*
* For the non-arp interface, we don't write L2 HDR.
* Excluding PPPoE from this, since we are now supporting PPPoE encap/decap.
@@ -1402,6 +1406,10 @@
reply_cm->match_src_port = tuple->return_ident;
}
+ atomic_set(&original_cm->rx_byte_count, 0);
+ reply_cm->rx_packet_count64 = 0;
+ atomic_set(&reply_cm->rx_byte_count, 0);
+ reply_cm->rx_byte_count64 = 0;
reply_cm->xmit_dev = src_dev;
reply_cm->xmit_dev_mtu = msg->conn_rule.flow_mtu;
@@ -1598,14 +1606,10 @@
}
}
- if (msg->rule_flags & SFE_RULE_CREATE_FLAG_RETURN_SRC_INTERFACE_CHECK) {
+ if (msg->rule_flags & SFE_RULE_CREATE_FLAG_SRC_INTERFACE_CHECK) {
reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK;
}
- if (msg->rule_flags & SFE_RULE_CREATE_FLAG_RETURN_SRC_INTERFACE_CHECK_NO_FLUSH) {
- reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK_NO_FLUSH;
- }
-
/*
* For the non-arp interface, we don't write L2 HDR.
* Excluding PPPoE from this, since we are now supporting PPPoE encap/decap.
@@ -2485,72 +2489,7 @@
static const struct device_attribute sfe_ipv6_cpu_attr =
__ATTR(stat_work_cpu, S_IWUSR | S_IRUGO, sfe_ipv6_get_cpu, sfe_ipv6_set_cpu);
-/*
- * DSCP rewrite table
- */
-static ssize_t
-sfe_ipv6_get_dscp_rewrite_mark_to_match(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct sfe_ipv6 *si = &__si6;
- return snprintf(buf, (ssize_t)PAGE_SIZE, "0x%x\n",
- si->dscp_rewrite_mark_to_match);
-}
-
-static ssize_t
-sfe_ipv6_set_dscp_rewrite_mark_to_match(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- struct sfe_ipv6 *si = &__si6;
- int ret;
- u32 mark_to_match;
-
- ret = kstrtou32(buf, 0, &mark_to_match);
- if (ret)
- return ret;
- si->dscp_rewrite_mark_to_match = mark_to_match;
- return size;
-}
-
-static const struct device_attribute sfe_ipv6_dscp_rewrite_mark_to_match_attr =
- __ATTR(dscp_rewrite_mark_to_match, S_IWUSR | S_IRUGO,
- sfe_ipv6_get_dscp_rewrite_mark_to_match,
- sfe_ipv6_set_dscp_rewrite_mark_to_match);
-
-static ssize_t
-sfe_ipv6_get_dscp_rewrite_dscp_to_set(struct device *dev,
- struct device_attribute *attr,
- char *buf)
-{
- struct sfe_ipv6 *si = &__si6;
- return snprintf(buf, (ssize_t)PAGE_SIZE, "0x%x\n",
- si->dscp_rewrite_dscp_to_set >> SFE_IPV6_DSCP_SHIFT);
-}
-
-static ssize_t
-sfe_ipv6_set_dscp_rewrite_dscp_to_set(struct device *dev,
- struct device_attribute *attr,
- const char *buf, size_t size)
-{
- struct sfe_ipv6 *si = &__si6;
- int ret;
- u32 dscp_to_set;
-
- ret = kstrtou32(buf, 0, &dscp_to_set);
- if (ret)
- return ret;
- si->dscp_rewrite_dscp_to_set = dscp_to_set << SFE_IPV6_DSCP_SHIFT;
- return size;
-}
-
-static const struct device_attribute sfe_ipv6_dscp_rewrite_dscp_to_set_attr =
- __ATTR(dscp_rewrite_dscp_to_set, S_IWUSR | S_IRUGO,
- sfe_ipv6_get_dscp_rewrite_dscp_to_set,
- sfe_ipv6_set_dscp_rewrite_dscp_to_set);
-
-/*
+ /*
* sfe_ipv6_hash_init()
* Initialize conn match hash lists
*/
@@ -2644,26 +2583,11 @@
goto exit3;
}
- result = sysfs_create_file(si->sys_ipv6,
- &sfe_ipv6_dscp_rewrite_mark_to_match_attr.attr);
- if (result) {
- DEBUG_ERROR("failed to register DSCP rewrite mark_to_match file: %d\n",
- result);
- goto exit4;
- }
- result = sysfs_create_file(si->sys_ipv6,
- &sfe_ipv6_dscp_rewrite_dscp_to_set_attr.attr);
- if (result) {
- DEBUG_ERROR("failed to register DSCP rewrite dscp_to_set file: %d\n",
- result);
- goto exit5;
- }
-
#ifdef CONFIG_NF_FLOW_COOKIE
result = sysfs_create_file(si->sys_ipv6, &sfe_ipv6_flow_cookie_attr.attr);
if (result) {
DEBUG_ERROR("failed to register flow cookie enable file: %d\n", result);
- goto exit6;
+ goto exit4;
}
#endif /* CONFIG_NF_FLOW_COOKIE */
@@ -2673,13 +2597,13 @@
#else
result = nf_register_net_hooks(&init_net, sfe_ipv6_ops_local_out, ARRAY_SIZE(sfe_ipv6_ops_local_out));
#endif
+#endif
if (result < 0) {
DEBUG_ERROR("can't register nf local out hook: %d\n", result);
- goto exit7;
+ goto exit5;
} else {
DEBUG_ERROR("Register nf local out hook success: %d\n", result);
}
-#endif
/*
* Register our debug char device.
@@ -2687,7 +2611,7 @@
result = register_chrdev(0, "sfe_ipv6", &sfe_ipv6_debug_dev_fops);
if (result < 0) {
DEBUG_ERROR("Failed to register chrdev: %d\n", result);
- goto exit8;
+ goto exit6;
}
si->debug_dev = result;
@@ -2702,7 +2626,7 @@
return 0;
-exit8:
+exit6:
#ifdef SFE_PROCESS_LOCAL_OUT
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
DEBUG_TRACE("sfe: Unregister local out hook\n");
@@ -2711,21 +2635,16 @@
DEBUG_TRACE("sfe: Unregister local out hook\n");
nf_unregister_net_hooks(&init_net, sfe_ipv6_ops_local_out, ARRAY_SIZE(sfe_ipv6_ops_local_out));
#endif
-exit7:
#endif
+exit5:
#ifdef CONFIG_NF_FLOW_COOKIE
sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_flow_cookie_attr.attr);
-exit6:
-#endif /* CONFIG_NF_FLOW_COOKIE */
- sysfs_remove_file(si->sys_ipv6,
- &sfe_ipv6_dscp_rewrite_dscp_to_set_attr.attr);
-exit5:
- sysfs_remove_file(si->sys_ipv6,
- &sfe_ipv6_dscp_rewrite_mark_to_match_attr.attr);
exit4:
+#endif /* CONFIG_NF_FLOW_COOKIE */
sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_cpu_attr.attr);
+
exit3:
sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_debug_dev_attr.attr);
@@ -2772,10 +2691,7 @@
#ifdef CONFIG_NF_FLOW_COOKIE
sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_flow_cookie_attr.attr);
#endif /* CONFIG_NF_FLOW_COOKIE */
- sysfs_remove_file(si->sys_ipv6,
- &sfe_ipv6_dscp_rewrite_dscp_to_set_attr.attr);
- sysfs_remove_file(si->sys_ipv6,
- &sfe_ipv6_dscp_rewrite_mark_to_match_attr.attr);
+
sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_cpu_attr.attr);
sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_debug_dev_attr.attr);
diff --git a/qca-nss-sfe/sfe_ipv6.h b/qca-nss-sfe/sfe_ipv6.h
index 9c78f1c..2aa9f41 100644
--- a/qca-nss-sfe/sfe_ipv6.h
+++ b/qca-nss-sfe/sfe_ipv6.h
@@ -82,7 +82,7 @@
#define SFE_IPV6_CONNECTION_MATCH_FLAG_INSERT_EGRESS_VLAN_TAG (1<<12)
/* Insert VLAN tag */
#define SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK (1<<13)
- /* Source interface check */
+ /* Source interface check.*/
#define SFE_IPV6_CONNECTION_MATCH_FLAG_PASSTHROUGH (1<<14)
/* passthrough flow: encap/decap to be skipped for this flow */
#define SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT (1<<15)
@@ -91,8 +91,6 @@
/* fast xmit checked or not*/
#define SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT_DEV_ADMISSION (1<<17)
/* Fast xmit may be possible for this flow, if SFE check passes */
-#define SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK_NO_FLUSH (1<<18)
- /* Source interface check but do not flush the connection */
/*
* IPv6 connection matching structure.
@@ -383,16 +381,6 @@
struct kobject *sys_ipv6; /* sysfs linkage */
int debug_dev; /* Major number of the debug char device */
u32 debug_read_seq; /* sequence number for debug dump */
-
- /*
- * DSCP rewrite table
- * When `mark_to_match` is set non-zero then any packet with the
- * specified skb->mark will override flow DSCP policy with
- * `dscp_to_set` value. i.e. basically equivalent to `ip6tables -m mark
- * --mark <mark_to_match> -j DSCP --set-dscp <dscp_to_set>`
- */
- u32 dscp_rewrite_mark_to_match;
- u32 dscp_rewrite_dscp_to_set;
};
/*
diff --git a/qca-nss-sfe/sfe_ipv6_gre.c b/qca-nss-sfe/sfe_ipv6_gre.c
index 8a48b3f..361c23a 100644
--- a/qca-nss-sfe/sfe_ipv6_gre.c
+++ b/qca-nss-sfe/sfe_ipv6_gre.c
@@ -99,21 +99,18 @@
* Source interface validate.
*/
if (unlikely((cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK) && (cm->match_dev != dev))) {
- if (!(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK_NO_FLUSH)) {
- struct sfe_ipv6_connection *c = cm->connection;
- int ret;
- DEBUG_TRACE("flush on source interface check failure\n");
- spin_lock_bh(&si->lock);
- ret = sfe_ipv6_remove_connection(si, c);
- spin_unlock_bh(&si->lock);
+ struct sfe_ipv6_connection *c = cm->connection;
+ int ret;
+ spin_lock_bh(&si->lock);
+ ret = sfe_ipv6_remove_connection(si, c);
+ spin_unlock_bh(&si->lock);
- if (ret) {
- sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
- }
+ if (ret) {
+ sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
}
rcu_read_unlock();
sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_INVALID_SRC_IFACE);
- DEBUG_TRACE("exception the packet on source interface check failure\n");
+ DEBUG_TRACE("flush on wrong source interface check failure\n");
return 0;
}
diff --git a/qca-nss-sfe/sfe_ipv6_tcp.c b/qca-nss-sfe/sfe_ipv6_tcp.c
index 6ba30b3..6ccc8c7 100644
--- a/qca-nss-sfe/sfe_ipv6_tcp.c
+++ b/qca-nss-sfe/sfe_ipv6_tcp.c
@@ -196,20 +196,17 @@
* Source interface validate.
*/
if (unlikely((cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK) && (cm->match_dev != dev))) {
- if (!(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK_NO_FLUSH)) {
- struct sfe_ipv6_connection *c = cm->connection;
- DEBUG_TRACE("flush on source interface check failure\n");
- spin_lock_bh(&si->lock);
- ret = sfe_ipv6_remove_connection(si, c);
- spin_unlock_bh(&si->lock);
+ struct sfe_ipv6_connection *c = cm->connection;
+ spin_lock_bh(&si->lock);
+ ret = sfe_ipv6_remove_connection(si, c);
+ spin_unlock_bh(&si->lock);
- if (ret) {
- sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
- }
+ if (ret) {
+ sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
}
rcu_read_unlock();
sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_INVALID_SRC_IFACE);
- DEBUG_TRACE("exception the packet on source interface check failure\n");
+ DEBUG_TRACE("flush on wrong source interface check failure\n");
return 0;
}
@@ -578,25 +575,9 @@
}
/*
- * Apply packet Mark.
- * If Mark was set by the Ingress Qdisc that takes precedence over
- * flow policy.
- */
- if (likely(skb->mark == 0)) {
- if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_MARK)) {
- skb->mark = cm->mark;
- }
- }
-
- /*
* Update DSCP
- * DSCP rewrite table takes precedence over flow policy.
*/
- if (unlikely(si->dscp_rewrite_mark_to_match != 0 &&
- si->dscp_rewrite_mark_to_match == skb->mark)) {
- sfe_ipv6_change_dsfield(iph, si->dscp_rewrite_dscp_to_set);
- } else if (unlikely(cm->flags &
- SFE_IPV6_CONNECTION_MATCH_FLAG_DSCP_REMARK)) {
+ if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_DSCP_REMARK)) {
sfe_ipv6_change_dsfield(iph, cm->dscp);
}
@@ -710,6 +691,13 @@
}
/*
+ * Mark outgoing packet
+ */
+ if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_MARK)) {
+ skb->mark = cm->mark;
+ }
+
+ /*
* For the first packets, check if it could got fast xmit.
*/
if (unlikely(!(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT_FLOW_CHECKED)
diff --git a/qca-nss-sfe/sfe_ipv6_udp.c b/qca-nss-sfe/sfe_ipv6_udp.c
index 445b43f..f34c6ee 100644
--- a/qca-nss-sfe/sfe_ipv6_udp.c
+++ b/qca-nss-sfe/sfe_ipv6_udp.c
@@ -206,20 +206,17 @@
* Source interface validate.
*/
if (unlikely((cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK) && (cm->match_dev != dev))) {
- if (!(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK_NO_FLUSH)) {
- struct sfe_ipv6_connection *c = cm->connection;
- DEBUG_TRACE("flush on source interface check failure\n");
- spin_lock_bh(&si->lock);
- ret = sfe_ipv6_remove_connection(si, c);
- spin_unlock_bh(&si->lock);
+ struct sfe_ipv6_connection *c = cm->connection;
+ spin_lock_bh(&si->lock);
+ ret = sfe_ipv6_remove_connection(si, c);
+ spin_unlock_bh(&si->lock);
- if (ret) {
- sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
- }
+ if (ret) {
+ sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
}
rcu_read_unlock();
sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_INVALID_SRC_IFACE);
- DEBUG_TRACE("exception the packet on source interface check failure\n");
+ DEBUG_TRACE("flush on wrong source interface check failure\n");
return 0;
}
@@ -403,25 +400,9 @@
}
/*
- * Apply packet Mark.
- * If Mark was set by the Ingress Qdisc that takes precedence over
- * flow policy.
- */
- if (likely(skb->mark == 0)) {
- if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_MARK)) {
- skb->mark = cm->mark;
- }
- }
-
- /*
* Update DSCP
- * DSCP rewrite table takes precedence over flow policy.
*/
- if (unlikely(si->dscp_rewrite_mark_to_match != 0 &&
- si->dscp_rewrite_mark_to_match == skb->mark)) {
- sfe_ipv6_change_dsfield(iph, si->dscp_rewrite_dscp_to_set);
- } else if (unlikely(cm->flags &
- SFE_IPV6_CONNECTION_MATCH_FLAG_DSCP_REMARK)) {
+ if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_DSCP_REMARK)) {
sfe_ipv6_change_dsfield(iph, cm->dscp);
}
@@ -542,6 +523,13 @@
}
/*
+ * Mark outgoing packet.
+ */
+ if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_MARK)) {
+ skb->mark = cm->mark;
+ }
+
+ /*
* For the first packets, check if it could got fast xmit.
*/
if (unlikely(!(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT_FLOW_CHECKED)