Project import generated by Copybara.

GitOrigin-RevId: 4cecb617b4de4f4356c9331aaa68740a89e65a05
diff --git a/build_scripts/build_all.sh b/build_scripts/build_all.sh
index 95d06f7..111d8c1 100755
--- a/build_scripts/build_all.sh
+++ b/build_scripts/build_all.sh
@@ -127,10 +127,6 @@
   ./build.sh ${product} ${eureka_src_path}
   popd
 
-  pushd ${top_dir}/sdk/nat46
-  ./build.sh ${product} ${eureka_src_path}
-  popd
-
   pushd ${top_dir}/sdk/qca-nss-sfe
   ./build.sh ${product} ${eureka_src_path}
   popd
diff --git a/build_scripts/release_oss.sh b/build_scripts/release_oss.sh
index ffefc33..732d06e 100755
--- a/build_scripts/release_oss.sh
+++ b/build_scripts/release_oss.sh
@@ -18,9 +18,9 @@
   rsync -av ${src}/ ${dst} --exclude .git
 }
 
-# Release source code under ./u-boot
+# Release source code under ./bootloader
 function release_bootloader() {
-  src=$1/u-boot
+  src=$1/bootloader
   dst=$2/u-boot
   echo "Copying bootloader from $src ==> $dst..."
 
diff --git a/build_scripts/setup_env.sh b/build_scripts/setup_env.sh
index b223268..a658366 100644
--- a/build_scripts/setup_env.sh
+++ b/build_scripts/setup_env.sh
@@ -25,7 +25,7 @@
 if [ ! -d "${TOP_DIR}" ]; then
   TOP_DIR="$(readlink -e $(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/../..)"
 fi
-ENABLE_64BIT_BUILD=${ENABLE_64BIT_BUILD:-"true"}
+ENABLE_64BIT_BUILD=${ENABLE_64BIT_BUILD:-"false"}
 
 _toolchain_dir=$(readlink -e ${TOP_DIR}/prebuilt/toolchain)
 _num_jobs=$(grep -c processor /proc/cpuinfo)
diff --git a/qca-nss-dp/Makefile b/qca-nss-dp/Makefile
index 3b00d77..157342b 100644
--- a/qca-nss-dp/Makefile
+++ b/qca-nss-dp/Makefile
@@ -44,7 +44,7 @@
 		   hal/dp_ops/syn_gmac_dp/syn_dp.o \
 		   hal/gmac_ops/syn/gmac/syn_if.o
 NSS_DP_INCLUDE += -I$(obj)/hal/dp_ops/syn_gmac_dp/include
-ccflags-y += -DNSS_DP_IPQ50XX -DNSS_DP_ENABLE_NAPI_GRO
+ccflags-y += -DNSS_DP_IPQ50XX
 endif
 
 ifeq ($(SoC),$(filter $(SoC),ipq95xx))
@@ -60,7 +60,7 @@
 		   hal/gmac_ops/syn/xgmac/syn_if.o
 NSS_DP_INCLUDE += -I$(obj)/hal/dp_ops/edma_dp/edma_v2
 NSS_DP_INCLUDE += -I$(obj)/hal/dp_ops/edma_dp/edma_v2/include
-ccflags-y += -DNSS_DP_IPQ95XX -DNSS_DP_PPE_SUPPORT -DNSS_DP_ENABLE_NAPI_GRO
+ccflags-y += -DNSS_DP_IPQ95XX -DNSS_DP_PPE_SUPPORT
 ifneq ($(CONFIG_NET_SWITCHDEV),)
 qca-nss-dp-objs += nss_dp_switchdev.o
 ccflags-y += -DNSS_DP_PPE_SWITCHDEV
diff --git a/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp.c b/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp.c
index ba6aa57..9f7d6eb 100644
--- a/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp.c
+++ b/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp.c
@@ -345,7 +345,7 @@
 		 * NETDEV_TX_BUSY. Packet will be requeued or dropped by the caller.
 		 * Queue will be re-enabled from Tx Complete.
 		 */
-		if (likely(!dp_global_ctx.tx_requeue_stop)) {
+		if (likely(!tx_requeue_stop)) {
 			netdev_dbg(netdev, "Stopping tx queue due to lack of tx descriptors");
 			atomic64_inc((atomic64_t *)&tx_info->tx_stats.tx_packets_requeued);
 			netif_stop_queue(netdev);
diff --git a/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp.h b/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp.h
index 7c1b81b..54c9ca3 100644
--- a/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp.h
+++ b/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp.h
@@ -41,6 +41,9 @@
 #define SYN_DP_PAGE_MODE_SKB_SIZE	256	/* SKB head buffer size for page mode */
 #define SYN_DP_QUEUE_INDEX		0	/* Only one Tx DMA channel 0 enabled */
 
+extern int tx_requeue_stop;
+extern int tx_desc_threshold_size;
+
 /*
  * syn_dp_info
  *	Synopysys GMAC Dataplane information
diff --git a/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp_rx.c b/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp_rx.c
index a695cf1..529c77e 100644
--- a/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp_rx.c
+++ b/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp_rx.c
@@ -381,6 +381,7 @@
 	struct dma_desc_rx *rx_desc_next = NULL;
 	uint8_t *next_skb_ptr;
 	skb_frag_t *frag = NULL;
+	bool is_gro_enabled = netdev->features & NETIF_F_GRO;
 
 	busy = atomic_read((atomic_t *)&rx_info->busy_rx_desc_cnt);
 	if (unlikely(!busy)) {
@@ -495,11 +496,12 @@
 				/*
 				 * Deliver the packet to linux
 				 */
-#if defined(NSS_DP_ENABLE_NAPI_GRO)
-				napi_gro_receive(&rx_info->napi_rx, rx_skb);
-#else
-				netif_receive_skb(rx_skb);
-#endif
+				if (is_gro_enabled) {
+					napi_gro_receive(&rx_info->napi_rx, rx_skb);
+				} else {
+					netif_receive_skb(rx_skb);
+				}
+
 				goto next_desc;
 			}
 
@@ -542,11 +544,13 @@
 				prefetch(next_skb_ptr + SYN_DP_RX_SKB_CACHE_LINE1);
 				prefetch(next_skb_ptr + SYN_DP_RX_SKB_CACHE_LINE3);
 			}
-#if defined(NSS_DP_ENABLE_NAPI_GRO)
-			napi_gro_receive(&rx_info->napi_rx, rx_skb);
-#else
-			netif_receive_skb(rx_skb);
-#endif
+
+			if (is_gro_enabled) {
+				napi_gro_receive(&rx_info->napi_rx, rx_skb);
+			} else {
+				netif_receive_skb(rx_skb);
+			}
+
 			goto next_desc;
 		}
 
@@ -601,11 +605,12 @@
 				prefetch(next_skb_ptr + SYN_DP_RX_SKB_CACHE_LINE3);
 			}
 
-#if defined(NSS_DP_ENABLE_NAPI_GRO)
-			napi_gro_receive(&rx_info->napi_rx, rx_info->head);
-#else
-			netif_receive_skb(rx_info->head);
-#endif
+			if (is_gro_enabled) {
+				napi_gro_receive(&rx_info->napi_rx, rx_info->head);
+			} else {
+				netif_receive_skb(rx_info->head);
+			}
+
 			rx_info->head = NULL;
 			goto next_desc;
 		}
diff --git a/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp_rx.h b/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp_rx.h
index bb6d005..8f0105e 100644
--- a/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp_rx.h
+++ b/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp_rx.h
@@ -19,8 +19,8 @@
 #ifndef __NSS_DP_SYN_DP_RX__
 #define __NSS_DP_SYN_DP_RX__
 
-#define SYN_DP_NAPI_BUDGET_RX		32
-#define SYN_DP_RX_DESC_SIZE		128	/* Rx Descriptors needed in the descriptor pool/queue */
+#define SYN_DP_NAPI_BUDGET_RX		64
+#define SYN_DP_RX_DESC_SIZE		2048	/* Rx Descriptors needed in the descriptor pool/queue */
 #define SYN_DP_RX_DESC_MAX_INDEX	(SYN_DP_RX_DESC_SIZE - 1)
 
 /*
diff --git a/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp_tx.c b/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp_tx.c
index 0c367b0..70c395f 100644
--- a/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp_tx.c
+++ b/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp_tx.c
@@ -205,6 +205,10 @@
 	atomic_add(desc_needed, (atomic_t *)&tx_info->busy_tx_desc_cnt);
 	syn_resume_dma_tx(tx_info->mac_base);
 
+	if (unlikely((SYN_DP_TX_DESC_SIZE - atomic_read((atomic_t *)&tx_info->busy_tx_desc_cnt)) < tx_desc_threshold_size)) {
+		netif_stop_queue(tx_info->netdev);
+	}
+
 	return 0;
 }
 
@@ -353,6 +357,10 @@
 	atomic_add(desc_needed, (atomic_t *)&tx_info->busy_tx_desc_cnt);
 	syn_resume_dma_tx(tx_info->mac_base);
 
+	if (unlikely((SYN_DP_TX_DESC_SIZE - atomic_read((atomic_t *)&tx_info->busy_tx_desc_cnt)) < tx_desc_threshold_size)) {
+		netif_stop_queue(tx_info->netdev);
+	}
+
 	return 0;
 }
 
@@ -474,6 +482,11 @@
 			 * Some error happened, collect error statistics.
 			 */
 			syn_dp_tx_error_cnt(tx_info, status);
+
+			/*
+			 * Enable DMA Tx to overcome the DMA Tx stall caused due to Jabber timeout error
+			 */
+			(status & DESC_TX_TIMEOUT) ? syn_enable_dma_tx(tx_info->mac_base) : 0;
 		}
 
 		tx_info->tx_comp_idx = syn_dp_tx_inc_index(tx_info->tx_comp_idx, 1);
diff --git a/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp_tx.h b/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp_tx.h
index 458fcd4..8ffbaa8 100644
--- a/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp_tx.h
+++ b/qca-nss-dp/hal/dp_ops/syn_gmac_dp/syn_dp_tx.h
@@ -18,11 +18,11 @@
 #ifndef __NSS_DP_SYN_DP_TX__
 #define __NSS_DP_SYN_DP_TX__
 
-#define SYN_DP_NAPI_BUDGET_TX		32
-#define SYN_DP_TX_DESC_SIZE		1024	/* Tx Descriptors needed in the descriptor pool/queue */
+#define SYN_DP_NAPI_BUDGET_TX		64
+#define SYN_DP_TX_DESC_SIZE		8192	/* Tx Descriptors needed in the descriptor pool/queue */
 #define SYN_DP_TX_DESC_MAX_INDEX	(SYN_DP_TX_DESC_SIZE - 1)
 #define SYN_DP_TX_INVALID_DESC_INDEX	SYN_DP_TX_DESC_SIZE
-
+#define NSS_DP_TX_MAX_DESC_SIZE		SYN_DP_TX_DESC_SIZE
 /*
  * syn_dp_tx_buf
  */
diff --git a/qca-nss-dp/include/nss_dp_dev.h b/qca-nss-dp/include/nss_dp_dev.h
index 2669639..58ac33b 100644
--- a/qca-nss-dp/include/nss_dp_dev.h
+++ b/qca-nss-dp/include/nss_dp_dev.h
@@ -145,7 +145,6 @@
 	uint32_t jumbo_mru;			/* Jumbo mru value for Rx processing */
 	bool overwrite_mode;		/* Overwrite mode for Rx processing */
 	bool page_mode;				/* Page mode for Rx processing */
-	bool tx_requeue_stop;		/* Disable queue stop for Tx processing */
 };
 
 /* Global data */
diff --git a/qca-nss-dp/nss_dp_main.c b/qca-nss-dp/nss_dp_main.c
index f728c93..5ae217f 100644
--- a/qca-nss-dp/nss_dp_main.c
+++ b/qca-nss-dp/nss_dp_main.c
@@ -40,6 +40,11 @@
 #define NSS_DP_NETDEV_TX_QUEUE_NUM NSS_DP_QUEUE_NUM
 #define NSS_DP_NETDEV_RX_QUEUE_NUM NSS_DP_QUEUE_NUM
 
+/*
+ * Maximum number of Tx descriptors supported
+ */
+#define NSS_DP_TX_DESC_SIZE NSS_DP_TX_MAX_DESC_SIZE
+
 /* ipq40xx_mdio_data */
 struct ipq40xx_mdio_data {
 	struct mii_bus *mii_bus;
@@ -64,10 +69,6 @@
 module_param(jumbo_mru, int, 0);
 MODULE_PARM_DESC(jumbo_mru, "jumbo mode");
 
-int tx_requeue_stop;
-module_param(tx_requeue_stop, int, 0);
-MODULE_PARM_DESC(tx_requeue_stop, "disable tx requeue function");
-
 int nss_dp_rx_napi_budget = NSS_DP_HAL_RX_NAPI_BUDGET;
 module_param(nss_dp_rx_napi_budget, int, S_IRUGO);
 MODULE_PARM_DESC(nss_dp_rx_napi_budget, "Rx NAPI budget");
@@ -107,6 +108,126 @@
 #endif
 
 /*
+ * Sysctl table
+ */
+struct ctl_table_header *nss_dp_ctl_table_header = NULL;
+
+int tx_requeue_stop;
+int tx_desc_threshold_size = 0;
+
+/*
+ * nss_dp_tx_requeue_stop()
+ * 	Tx requeue stop sysctl handler
+ */
+static int nss_dp_tx_requeue_stop(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+	int ret;
+
+	int current_value = tx_requeue_stop;
+
+	ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
+
+	if (!write) {
+		return ret;
+	}
+
+	if (ret) {
+		pr_err("Errno: -%d.\n", ret);
+		return ret;
+	}
+
+	/*
+	 * Check if tx_requeue_stop is holding a valid value
+	 */
+	if ((tx_requeue_stop != 1) && (tx_requeue_stop != 0)) {
+		pr_err(" Invalid input. Valid values are 0/1\n");
+		tx_requeue_stop = current_value;
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+/*
+ * nss_dp_tx_desc_threshold_set()
+ * 	Set the minimum threshold limit for Tx descriptor size
+ */
+static int nss_dp_tx_desc_threshold_set(struct ctl_table *ctl, int write, void __user *buffer, size_t *lenp, loff_t *ppos)
+{
+	int ret;
+
+	int current_value = tx_desc_threshold_size;
+
+	ret = proc_dointvec(ctl, write, buffer, lenp, ppos);
+
+	if (!write) {
+		return ret;
+	}
+
+	if (ret) {
+		pr_err("Errno: -%d.\n", ret);
+		return ret;
+	}
+
+	/*
+	 * Check if tx_desc_threshold_size is holding a valid value
+	 */
+	if ((tx_desc_threshold_size >= NSS_DP_TX_DESC_SIZE) || (tx_desc_threshold_size < 0)) {
+		pr_err("Invalid input. Value should be between 0 and %d, current value: %d\n", SYN_DP_TX_DESC_SIZE - 1, current_value);
+		tx_desc_threshold_size = current_value;
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+/* nss_dp_table
+ *	Sysctl entries which are part of nss dp
+ */
+static struct ctl_table nss_dp_table[] = {
+	{
+		.procname               = "tx_requeue_stop",
+		.data                   = &tx_requeue_stop,
+		.maxlen                 = sizeof(int),
+		.mode                   = 0666,
+		.proc_handler           = &nss_dp_tx_requeue_stop,
+	},
+	{
+		.procname               = "tx_desc_threshold",
+		.data                   = &tx_desc_threshold_size,
+		.maxlen                 = sizeof(int),
+		.mode                   = 0666,
+		.proc_handler           = &nss_dp_tx_desc_threshold_set,
+	},
+	{ }
+};
+
+/* nss_dp_root_dir
+ *	Root directory for nss dp using sysctl
+ */
+static struct ctl_table nss_dp_root_dir[] = {
+	{
+		.procname               = "nss_dp_tx",
+		.mode                   = 0666,
+		.child                  = nss_dp_table,
+	},
+	{ }
+};
+
+/*
+ * nss_dp_root
+ *	Root for networking parameters using sysctl
+ */
+static struct ctl_table nss_dp_root[] = {
+	{
+		.procname               = "net",
+		.mode                   = 0666,
+		.child                  = nss_dp_root_dir,
+	},
+	{}
+};
+
+/*
  * nss_dp_do_ioctl()
  */
 static int32_t nss_dp_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
@@ -1006,13 +1127,14 @@
 	dp_global_ctx.rx_buf_size = NSS_DP_RX_BUFFER_SIZE;
 
 	/*
+	 * By default, disabling the requeue functionality
+	 */
+	tx_requeue_stop = 1;
+
+	/*
 	 * Get the module params.
 	 * We do not support page_mode or jumbo_mru on low memory profiles.
 	 */
-	dp_global_ctx.tx_requeue_stop = false;
-	if (tx_requeue_stop != 0) {
-		dp_global_ctx.tx_requeue_stop = true;
-	}
 #if !defined(NSS_DP_MEM_PROFILE_LOW) && !defined(NSS_DP_MEM_PROFILE_MEDIUM)
 	dp_global_ctx.overwrite_mode = overwrite_mode;
 	dp_global_ctx.page_mode = page_mode;
@@ -1032,6 +1154,13 @@
 		return -EFAULT;
 	}
 
+	/*
+	 * Register sysctl table
+	 */
+	if (!nss_dp_ctl_table_header) {
+		nss_dp_ctl_table_header = register_sysctl_table(nss_dp_root);
+	}
+
 	ret = platform_driver_register(&nss_dp_drv);
 	if (ret)
 		pr_info("NSS DP platform drv register failed\n");
@@ -1057,6 +1186,13 @@
 		dp_global_ctx.common_init_done = false;
 	}
 
+	/*
+	 * Unregister sysctl table
+	 */
+	if (nss_dp_ctl_table_header) {
+		unregister_sysctl_table(nss_dp_ctl_table_header);
+	}
+
 	platform_driver_unregister(&nss_dp_drv);
 }
 
diff --git a/qca-nss-ecm/Makefile b/qca-nss-ecm/Makefile
index b34e832..debcc10 100644
--- a/qca-nss-ecm/Makefile
+++ b/qca-nss-ecm/Makefile
@@ -19,6 +19,9 @@
 # Makefile for the QCA NSS ECM
 # ###################################################
 
+ifeq ($(ECM_FRONT_END_SFE_ENABLE), y)
+obj-m += examples/ecm_sfe_l2.o
+endif
 obj-m +=examples/ecm_ae_select.o
 
 obj-m += ecm.o
@@ -114,6 +117,7 @@
 # Define ECM_INTERFACE_PPPOE_ENABLE=y in order
 # to enable support for PPPoE acceleration.
 # #############################################################################
+ECM_INTERFACE_PPPOE_ENABLE=y
 ccflags-$(ECM_INTERFACE_PPPOE_ENABLE) += -DECM_INTERFACE_PPPOE_ENABLE
 
 # #############################################################################
diff --git a/qca-nss-ecm/build.sh b/qca-nss-ecm/build.sh
index 0c67b1b..452d82c 100755
--- a/qca-nss-ecm/build.sh
+++ b/qca-nss-ecm/build.sh
@@ -20,9 +20,8 @@
 
 kernel_path=$(readlink -e ${sdk_top_dir}/../kernel)
 qca_sfe_path=$(readlink -e ${sdk_top_dir}/qca-nss-sfe/)
-nat46_path=$(readlink -e ${sdk_top_dir}/nat46/nat46/modules)
 soc_type=ipq50xx
-extra_cflags="-I${qca_sfe_path}/exports -I${nat46_path}"
+extra_cflags="-I${qca_sfe_path}/exports"
 
 build_flags="ECM_CLASSIFIER_HYFI_ENABLE=n ECM_MULTICAST_ENABLE=n ECM_INTERFACE_IPSEC_ENABLE=n ECM_INTERFACE_PPTP_ENABLE=n ECM_INTERFACE_L2TPV2_ENABLE=n ECM_INTERFACE_GRE_TAP_ENABLE=n ECM_INTERFACE_GRE_TUN_ENABLE=n ECM_INTERFACE_SIT_ENABLE=n ECM_INTERFACE_TUNIPIP6_ENABLE=n ECM_INTERFACE_RAWIP_ENABLE=n ECM_INTERFACE_BOND_ENABLE=n ECM_XFRM_ENABLE=n ECM_FRONT_END_SFE_ENABLE=y ECM_NON_PORTED_SUPPORT_ENABLE=n ECM_INTERFACE_MAP_T_ENABLE=n ECM_INTERFACE_VXLAN_ENABLE=n ECM_INTERFACE_OVS_BRIDGE_ENABLE=n ECM_CLASSIFIER_OVS_ENABLE=n ECM_CLASSIFIER_DSCP_IGS=n ECM_IPV6_ENABLE=y ECM_FRONT_END_NSS_ENABLE=n EXAMPLES_BUILD_OVS=n"
 
@@ -33,7 +32,7 @@
     # make kernel module
     echo "Build ${MODULE_NAME}"
     ${CROSS_MAKE} -C ${kernel_path} M=${sdk_top_dir}/${MODULE_NAME} ${build_flags} SoC=${soc_type} EXTRA_CFLAGS="${extra_cflags}" \
-	    KBUILD_EXTRA_SYMBOLS="${qca_sfe_path}/Module.symvers ${nat46_path}/Module.symvers" V=1
+	    KBUILD_EXTRA_SYMBOLS="${qca_sfe_path}/Module.symvers" V=1
 }
 
 ##################################################
@@ -63,6 +62,7 @@
     local module_target_dir="$(GetModulePath ${eureka_src_path} ${product})"
     mkdir -p ${module_target_dir}
     cp -f ecm.ko ${module_target_dir}/${MODULE_NAME}.ko
+    cp -f examples/ecm_sfe_l2.ko ${module_target_dir}
 }
 
 function Usage() {
diff --git a/qca-nss-ecm/ecm_db/ecm_db_connection.c b/qca-nss-ecm/ecm_db/ecm_db_connection.c
index 869e35d..fd68f23 100644
--- a/qca-nss-ecm/ecm_db/ecm_db_connection.c
+++ b/qca-nss-ecm/ecm_db/ecm_db_connection.c
@@ -279,20 +279,19 @@
 
 	DEBUG_INFO("%px: defunct timer expired\n", ci);
 
-	/*
-	 * If defunct fails, return. Do not remove the last ref count. This failure means
-	 * it will be re-tried later with the ecm_db_connection_make_defunct function
-	 * until the total failure count reaches to the max limit which is 250.
-	 * When the limit is reached, defunct process will return true and let
-	 * the connection goes off.
-	 */
 	ret = ci->defunct(ci->feci, &accel_mode);
 
 	/*
-	 * Release the last reference of this connection. This reference is the one
-	 * which was held when the connection was allocated.
+	 * If the returned 'ret' is success, this means this callback succeeded to
+	 * defunct the connection and it can release the last reference.
+	 * If it fails, this means that another defunct process defuncted the connection
+	 * before this callback. In that case, we will check the accel_mode of the connection.
+	 * If the other call defuncted the connection successfully, it will set the accel_mode to
+	 * ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT_SHORT for s short amount of time to avoid
+	 * further accel/decel attempts. So, in this accel_mode, this callback shouldn't release the
+	 * last reference. It will be released by the ecm_db_connection_make_defunct() function.
 	 */
-	if (ret || ECM_FRONT_END_ACCELERATION_FAILED(accel_mode)) {
+	if (ret || (ECM_FRONT_END_ACCELERATION_FAILED(accel_mode) && (accel_mode != ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT_SHORT))) {
 		ecm_db_connection_deref(ci);
 	}
 }
diff --git a/qca-nss-ecm/ecm_db/ecm_db_node.c b/qca-nss-ecm/ecm_db/ecm_db_node.c
index c3d70be..18d6aba 100644
--- a/qca-nss-ecm/ecm_db/ecm_db_node.c
+++ b/qca-nss-ecm/ecm_db/ecm_db_node.c
@@ -1,9 +1,12 @@
 /*
  **************************************************************************
  * Copyright (c) 2014-2018, 2020-2021 The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
  * above copyright notice and this permission notice appear in all copies.
+ *
  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
@@ -712,6 +715,17 @@
 EXPORT_SYMBOL(ecm_db_node_hash_index_get_first);
 
 /*
+ * ecm_db_node_get_connections_count()
+ *	Returns the connections count on the node in the given direction.
+ */
+int ecm_db_node_get_connections_count(struct ecm_db_node_instance *ni, ecm_db_obj_dir_t dir)
+{
+	DEBUG_CHECK_MAGIC(ni, ECM_DB_NODE_INSTANCE_MAGIC, "%px: magic failed\n", ni);
+
+	return ni->connections_count[dir];
+}
+
+/*
  * ecm_db_node_alloc()
  *	Allocate a node instance
  */
diff --git a/qca-nss-ecm/ecm_db/ecm_db_node.h b/qca-nss-ecm/ecm_db/ecm_db_node.h
index a2c0e03..1661ccd 100644
--- a/qca-nss-ecm/ecm_db/ecm_db_node.h
+++ b/qca-nss-ecm/ecm_db/ecm_db_node.h
@@ -1,9 +1,12 @@
 /*
  **************************************************************************
  * Copyright (c) 2014-2020, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
  * above copyright notice and this permission notice appear in all copies.
+ *
  * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
  * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
  * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
@@ -125,6 +128,8 @@
 int ecm_db_node_hash_index_get_first(void);
 #endif
 
+int ecm_db_node_get_connections_count(struct ecm_db_node_instance *ni, ecm_db_obj_dir_t dir);
+
 void ecm_db_node_ovs_connections_masked_defunct(int ip_ver, uint8_t *src_mac, bool src_mac_check, ip_addr_t src_addr_mask,
 							uint16_t src_port_mask, uint8_t *dest_mac, bool dest_mac_check,
 							ip_addr_t dest_addr_mask, uint16_t dest_port_mask,
diff --git a/qca-nss-ecm/ecm_interface.c b/qca-nss-ecm/ecm_interface.c
index b46233b..52bd11b 100644
--- a/qca-nss-ecm/ecm_interface.c
+++ b/qca-nss-ecm/ecm_interface.c
@@ -6424,15 +6424,12 @@
 			 */
 			if ((is_ported || ecm_db_connection_is_pppoe_bridged_get(ci)) &&
 				is_valid_ether_addr(mac_addr) && ecm_front_end_is_bridge_port(dev) && rx_packets) {
-				DEBUG_TRACE("Update bridge fdb entry for mac: %pM\n", mac_addr);
 
+				DEBUG_TRACE("Update bridge fdb entry for mac: %pM\n", mac_addr);
 				/*
-				 * Update fdb entry only if it exist. Please note that br_refresh_fdb_entry() API
-				 * creates new fdb entry if it does not exist.
+				 * Update the existing fdb entry's timestamp only.
 				 */
-				if (br_fdb_has_entry(dev, mac_addr, 0)) {
-					br_refresh_fdb_entry(dev, mac_addr);
-				}
+				br_fdb_entry_refresh(dev, mac_addr, 0);
 			}
 		}
 
@@ -7020,7 +7017,12 @@
 			 * FROM_NAT and TO_NAT have the same list of connections.
 			 */
 			for (dir = 0; dir <= ECM_DB_OBJ_DIR_TO; dir++) {
-				ecm_db_traverse_node_connection_list_and_defunct(ni, dir, ip_version);
+				/*
+				 * If there is connection on this node, call the defunct function.
+				 */
+				if (ecm_db_node_get_connections_count(ni, dir)) {
+					ecm_db_traverse_node_connection_list_and_defunct(ni, dir, ip_version);
+				}
 			}
 		}
 
diff --git a/qca-nss-ecm/ecm_types.h b/qca-nss-ecm/ecm_types.h
index 6ec2fec..3777688 100644
--- a/qca-nss-ecm/ecm_types.h
+++ b/qca-nss-ecm/ecm_types.h
@@ -192,13 +192,13 @@
 #define ECM_LINUX6_TO_IP_ADDR(d,s) \
 	{ \
 		ecm_type_check_ecm_ip_addr(d); \
-		ecm_type_check_ae_ipv6(&s); \
+		ecm_type_check_ae_ipv6(s); \
 		__ECM_IP_ADDR_COPY_NO_CHECK(d,s); \
 	}
 
 #define ECM_IP_ADDR_TO_LINUX6(d,s) \
 	{ \
-		ecm_type_check_ae_ipv6(&d); \
+		ecm_type_check_ae_ipv6(d); \
 		ecm_type_check_ecm_ip_addr(s); \
 		__ECM_IP_ADDR_COPY_NO_CHECK(d,s); \
 	}
diff --git a/qca-nss-ecm/examples/ecm_sfe_l2.c b/qca-nss-ecm/examples/ecm_sfe_l2.c
new file mode 100644
index 0000000..dd64653
--- /dev/null
+++ b/qca-nss-ecm/examples/ecm_sfe_l2.c
@@ -0,0 +1,1085 @@
+/*
+ **************************************************************************
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ **************************************************************************
+ */
+#include <linux/module.h>
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/debugfs.h>
+#include <linux/string.h>
+#include <linux/ctype.h>
+#include <linux/etherdevice.h>
+#include <linux/inet.h>
+
+#include "exports/ecm_sfe_common_public.h"
+
+/*
+ * Global WAN interface name parameter.
+ */
+char wan_name[IFNAMSIZ];
+int wan_name_len;
+
+/*
+ * DebugFS entry object.
+ */
+static struct dentry *ecm_sfe_l2_dentry;
+
+/*
+ * Policy rule directions.
+ */
+enum ecm_sfe_l2_policy_rule_dir {
+	ECM_SFE_L2_POLICY_RULE_EGRESS = 1,
+	ECM_SFE_L2_POLICY_RULE_INGRESS,
+	ECM_SFE_L2_POLICY_RULE_EGRESS_INGRESS,
+};
+
+/*
+ * Policy rule commands.
+ */
+enum ecm_sfe_l2_policy_rule_cmd {
+	ECM_SFE_L2_POLICY_RULE_ADD = 1,
+	ECM_SFE_L2_POLICY_RULE_DEL,
+	ECM_SFE_L2_POLICY_RULE_FLUSH_ALL
+};
+
+/*
+ *  ECM tuple directions.
+ */
+enum ecm_sfe_l2_tuple_dir {
+	ECM_SFE_L2_TUPLE_DIR_ORIGINAL,
+	ECM_SFE_L2_TUPLE_DIR_REPLY,
+};
+
+/*
+ * Defunct by 5-tuple command option types.
+ */
+enum ecm_sfe_l2_defunct_by_5tuple_options {
+	ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_IP_VERSION,
+	ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_SIP,
+	ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_SPORT,
+	ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_DIP,
+	ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_DPORT,
+	ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_PROTOCOL,
+	ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_MAX,
+};
+
+/*
+ * Policy rule structure
+ */
+struct ecm_sfe_l2_policy_rule {
+	struct list_head list;
+	int protocol;
+	int src_port;
+	int dest_port;
+	uint32_t src_addr[4];
+	uint32_t dest_addr[4];
+	int ip_ver;
+	enum ecm_sfe_l2_policy_rule_dir direction;
+};
+
+LIST_HEAD(ecm_sfe_l2_policy_rules);
+DEFINE_SPINLOCK(ecm_sfe_l2_policy_rules_lock);
+
+/*
+ * ecm_sfe_l2_policy_rule_find()
+ *	Finds a policy rule with the given parameters.
+ */
+static struct ecm_sfe_l2_policy_rule *ecm_sfe_l2_policy_rule_find(int ip_ver, uint32_t *sip_addr, int sport,
+								  uint32_t *dip_addr, int dport,
+								  int protocol)
+{
+	struct ecm_sfe_l2_policy_rule *rule = NULL;
+
+	list_for_each_entry(rule , &ecm_sfe_l2_policy_rules, list) {
+		if (rule->ip_ver != ip_ver)
+			continue;
+
+		if (rule->protocol && (rule->protocol != protocol))
+			continue;
+
+		if (rule->dest_port && (rule->dest_port != dport))
+			continue;
+
+		if (rule->src_port && (rule->src_port != sport))
+			continue;
+
+		if (ip_ver == 4) {
+			if (rule->dest_addr[0] && (rule->dest_addr[0] != dip_addr[0]))
+				continue;
+		} else {
+			if (rule->dest_addr[0] && memcmp(rule->dest_addr, dip_addr, sizeof(uint32_t) * 4))
+				continue;
+		}
+
+		if (ip_ver == 4) {
+			if (rule->src_addr[0] && (rule->src_addr[0] != sip_addr[0]))
+				continue;
+		} else {
+			if (rule->src_addr[0] && memcmp(rule->src_addr, sip_addr, sizeof(uint32_t) * 4))
+				continue;
+		}
+
+		return rule;
+	}
+	return NULL;
+}
+
+/*
+ * ecm_sfe_l2_connection_check_with_policy_rules()
+ *	Checks the ECM tuple with the policy rules in our rules list and
+ *	set the L2 acceleration accordingly, if there is a match.
+ */
+static uint32_t ecm_sfe_l2_connection_check_with_policy_rules(struct ecm_sfe_common_tuple *tuple, enum ecm_sfe_l2_tuple_dir tuple_dir)
+{
+	struct ecm_sfe_l2_policy_rule *rule = NULL;
+	enum ecm_sfe_l2_policy_rule_dir direction;
+	uint32_t l2_accel_bits = (ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED | ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED);
+
+	if (tuple_dir == ECM_SFE_L2_TUPLE_DIR_ORIGINAL) {
+		spin_lock_bh(&ecm_sfe_l2_policy_rules_lock);
+		rule = ecm_sfe_l2_policy_rule_find(tuple->ip_ver, tuple->src_addr, tuple->src_port,
+						   tuple->dest_addr, tuple->dest_port, tuple->protocol);
+		if (!rule) {
+			spin_unlock_bh(&ecm_sfe_l2_policy_rules_lock);
+			pr_warn("No rule with this tuple\n");
+			goto done;
+		}
+		direction = rule->direction;
+		spin_unlock_bh(&ecm_sfe_l2_policy_rules_lock);
+
+		if (direction == ECM_SFE_L2_POLICY_RULE_EGRESS) {
+			pr_debug("flow side should be L3 interface\n");
+			l2_accel_bits &= ~ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED;
+		} else if (direction == ECM_SFE_L2_POLICY_RULE_INGRESS) {
+			pr_debug("return side should be L3 interface\n");
+			l2_accel_bits &= ~ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED;
+		}
+	} else if (tuple_dir == ECM_SFE_L2_TUPLE_DIR_REPLY) {
+		spin_lock_bh(&ecm_sfe_l2_policy_rules_lock);
+		rule = ecm_sfe_l2_policy_rule_find(tuple->ip_ver, tuple->dest_addr, tuple->dest_port,
+						   tuple->src_addr, tuple->src_port, tuple->protocol);
+
+		if (!rule) {
+			spin_unlock_bh(&ecm_sfe_l2_policy_rules_lock);
+			pr_warn("No rule with this tuple\n");
+			goto done;
+		}
+		direction = rule->direction;
+		spin_unlock_bh(&ecm_sfe_l2_policy_rules_lock);
+
+		if (direction == ECM_SFE_L2_POLICY_RULE_EGRESS) {
+			pr_debug("return side should be L3 interface\n");
+			l2_accel_bits &= ~ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED;
+		} else if (direction == ECM_SFE_L2_POLICY_RULE_INGRESS) {
+			pr_debug("flow side should be L3 interface\n");
+			l2_accel_bits &= ~ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED;
+		}
+	} else {
+		pr_err("unknow tuple_dir: %d\n", tuple_dir);
+		goto done;
+	}
+
+	if (direction == ECM_SFE_L2_POLICY_RULE_EGRESS_INGRESS) {
+		pr_debug("both sides should be L3 interface\n");
+		l2_accel_bits &= ~ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED;
+		l2_accel_bits &= ~ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED;
+	}
+done:
+	return l2_accel_bits;
+}
+
+/*
+ * ecm_sfe_l2_accel_check_callback()
+ *	L2 acceleration check function callback.
+ */
+uint32_t ecm_sfe_l2_accel_check_callback(struct ecm_sfe_common_tuple *tuple)
+{
+	struct net_device *flow_dev;
+	struct net_device *return_dev;
+	struct net_device *wan_dev;
+	uint32_t l2_accel_bits = (ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED | ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED);
+
+	if (strlen(wan_name) == 0) {
+		pr_debug("WAN interface is not set in the debugfs\n");
+		goto done;
+	}
+
+	wan_dev =  dev_get_by_name(&init_net, wan_name);
+	if (!wan_dev) {
+		pr_debug("WAN interface: %s couldn't be found\n", wan_name);
+		goto done;
+	}
+
+	flow_dev = dev_get_by_index(&init_net, tuple->src_ifindex);
+	if (!flow_dev) {
+		pr_debug("flow netdevice couldn't be found with index: %d\n", tuple->src_ifindex);
+		dev_put(wan_dev);
+		goto done;
+	}
+
+	return_dev = dev_get_by_index(&init_net, tuple->dest_ifindex);
+	if (!return_dev) {
+		pr_debug("return netdevice couldn't be found with index: %d\n", tuple->dest_ifindex);
+		dev_put(wan_dev);
+		dev_put(flow_dev);
+		goto done;
+	}
+
+	if (wan_dev == return_dev) {
+		/*
+		 * Check the tuple with the policy rules in the ORIGINAL direction of the tuple.
+		 */
+		l2_accel_bits = ecm_sfe_l2_connection_check_with_policy_rules(tuple, ECM_SFE_L2_TUPLE_DIR_ORIGINAL);
+	} else if (wan_dev == flow_dev) {
+		/*
+		 * Check the tuple with the policy rules in the REPLY direction of the tuple.
+		 */
+		l2_accel_bits = ecm_sfe_l2_connection_check_with_policy_rules(tuple, ECM_SFE_L2_TUPLE_DIR_REPLY);
+	}
+	dev_put(wan_dev);
+	dev_put(flow_dev);
+	dev_put(return_dev);
+
+done:
+	return l2_accel_bits;
+}
+
+/*
+ * ecm_sfe_l2_flush_policy_rules()
+ *	Flushes all the policy rules.
+ */
+static void ecm_sfe_l2_flush_policy_rules(void)
+{
+	struct ecm_sfe_l2_policy_rule *rule;
+	struct ecm_sfe_l2_policy_rule *tmp;
+
+	spin_lock_bh(&ecm_sfe_l2_policy_rules_lock);
+	list_for_each_entry_safe(rule , tmp, &ecm_sfe_l2_policy_rules, list) {
+		list_del(&rule->list);
+		spin_unlock_bh(&ecm_sfe_l2_policy_rules_lock);
+		kfree(rule);
+		spin_lock_bh(&ecm_sfe_l2_policy_rules_lock);
+	}
+	spin_unlock_bh(&ecm_sfe_l2_policy_rules_lock);
+}
+
+/*
+ * ecm_sfe_l2_delete_policy_rule()
+ *	Deletes a policy rule with the given parameters.
+ */
+static bool ecm_sfe_l2_delete_policy_rule(int ip_ver, uint32_t *sip_addr, int sport, uint32_t *dip_addr, int dport, int protocol)
+{
+	struct ecm_sfe_l2_policy_rule *rule;
+
+	spin_lock_bh(&ecm_sfe_l2_policy_rules_lock);
+	rule = ecm_sfe_l2_policy_rule_find(ip_ver, sip_addr, sport, dip_addr, dport, protocol);
+	if (!rule) {
+		spin_unlock_bh(&ecm_sfe_l2_policy_rules_lock);
+		pr_warn("rule cannot be found in the list\n");
+		return false;
+	}
+	list_del(&rule->list);
+	spin_unlock_bh(&ecm_sfe_l2_policy_rules_lock);
+	kfree(rule);
+
+	pr_info("rule deleted\n");
+	return true;
+}
+
+/*
+ * ecm_sfe_l2_add_policy_rule()
+ *	Adds a policy rule with the given parameters.
+ */
+static bool ecm_sfe_l2_add_policy_rule(int ip_ver, uint32_t *sip_addr, int sport, uint32_t *dip_addr, int dport, int protocol, enum ecm_sfe_l2_policy_rule_dir direction)
+{
+	struct ecm_sfe_l2_policy_rule *rule;
+
+	spin_lock_bh(&ecm_sfe_l2_policy_rules_lock);
+	rule = ecm_sfe_l2_policy_rule_find(ip_ver, sip_addr, sport, dip_addr, dport, protocol);
+	if (rule) {
+		if (rule->direction != direction) {
+			pr_info("Update direction of the rule from %d to %d\n", rule->direction, direction);
+			rule->direction = direction;
+		}
+		spin_unlock_bh(&ecm_sfe_l2_policy_rules_lock);
+		pr_warn("rule is already present\n");
+		return true;
+	}
+	spin_unlock_bh(&ecm_sfe_l2_policy_rules_lock);
+
+	rule = kzalloc(sizeof(struct ecm_sfe_l2_policy_rule), GFP_ATOMIC);
+	if (!rule) {
+		pr_warn("alloc failed for new rule\n");
+		return false;
+	}
+
+	rule->ip_ver = ip_ver;
+	rule->protocol = protocol;
+	rule->src_port = sport;
+	rule->dest_port = dport;
+	memcpy(rule->src_addr, sip_addr, sizeof(uint32_t) * 4);
+	memcpy(rule->dest_addr, dip_addr, sizeof(uint32_t) * 4);
+	rule->direction = direction;
+
+	INIT_LIST_HEAD(&rule->list);
+
+	spin_lock_bh(&ecm_sfe_l2_policy_rules_lock);
+	list_add(&rule->list, &ecm_sfe_l2_policy_rules);
+	spin_unlock_bh(&ecm_sfe_l2_policy_rules_lock);
+
+	pr_info("rule added\n");
+	return true;
+}
+
+/*
+ * ecm_sfe_l2_policy_rule_write()
+ *	Adds a policy rule to the rule table.
+ *
+ * Policy rule must include cmd, ip_ver and direction. It can also include src/dest IP and ports, protocol.
+ * cmd and ip_ver MUST be the first 2 options in the command.
+ */
+static ssize_t ecm_sfe_l2_policy_rule_write(struct file *file,
+		const char __user *user_buf, size_t count, loff_t *offset)
+{
+	char *cmd_buf;
+	char *fields;
+	char *token;
+	char *option, *value;
+	int cmd = 0;		/* must be present in the rule */
+	int ip_ver = 0;	/* must be present in the rule */
+	uint32_t sip_addr[4] = {0};
+	uint32_t dip_addr[4] = {0};
+	int sport = 0;
+	int dport = 0;
+	int protocol = 0;
+	int direction = 0;	/* must be present in the rule */
+
+	/*
+	 * Command is formed as:
+	 * echo "cmd=1 ip_ver=4 dport=443 protocol=6 direction=1" > /sys/kernel/debug/ecm_sfe_l2/policy_rules
+	 *
+	 * cmd: 1 is to add, 2 is to delete a rule.
+	 * direction: 1 is egress, 2 is ingress, 3 is both
+	 */
+	cmd_buf = kzalloc(count + 1, GFP_ATOMIC);
+	if (!cmd_buf) {
+		pr_warn("unable to allocate memory for cmd buffer\n");
+		return -ENOMEM;
+	}
+
+	count = simple_write_to_buffer(cmd_buf, count, offset, user_buf, count);
+
+	/*
+	 * Split the buffer into tokens
+	 */
+	fields = cmd_buf;
+	while ((token = strsep(&fields, " "))) {
+		pr_info("\ntoken: %s\n", token);
+
+		option = strsep(&token, "=");
+		value = token;
+
+		pr_info("\t\toption: %s\n", option);
+		pr_info("\t\tvalue: %s\n", value);
+
+		if (!strcmp(option, "cmd")) {
+			if (sscanf(value, "%d", &cmd)) {
+				if (cmd != ECM_SFE_L2_POLICY_RULE_ADD && cmd != ECM_SFE_L2_POLICY_RULE_DEL &&
+					cmd != ECM_SFE_L2_POLICY_RULE_FLUSH_ALL) {
+					pr_err("invalid cmd value: %d\n", cmd);
+					goto fail;
+				}
+				continue;
+			}
+			pr_warn("cannot read value\n");
+			goto fail;
+		}
+
+		if (!strcmp(option, "ip_ver")) {
+			if (sscanf(value, "%d", &ip_ver)) {
+				if (ip_ver != 4 && ip_ver != 6) {
+					pr_err("invalid ip_ver: %d\n", ip_ver);
+					goto fail;
+				}
+				continue;
+			}
+			pr_warn("cannot read value\n");
+			goto fail;
+		}
+
+		if (!strcmp(option, "protocol")) {
+			if (sscanf(value, "%d", &protocol)) {
+				continue;
+			}
+			pr_warn("cannot read value\n");
+			goto fail;
+		}
+
+		if (!strcmp(option, "sport")) {
+			if (sscanf(value, "%d", &sport)) {
+				continue;
+			}
+			pr_warn("cannot read value\n");
+			goto fail;
+		}
+
+		if (!strcmp(option, "dport")) {
+			if (sscanf(value, "%d", &dport)) {
+				continue;
+			}
+			pr_warn("cannot read value\n");
+			goto fail;
+		}
+
+		if (!strcmp(option, "direction")) {
+			if (cmd == ECM_SFE_L2_POLICY_RULE_DEL) {
+				pr_err("direction is not allowed in delete command\n");
+				goto fail;
+			}
+
+			if (sscanf(value, "%d", &direction)) {
+				if (direction != ECM_SFE_L2_POLICY_RULE_EGRESS
+					&& direction != ECM_SFE_L2_POLICY_RULE_INGRESS
+					&& direction != ECM_SFE_L2_POLICY_RULE_EGRESS_INGRESS) {
+
+					pr_err("invalid direction: %d\n", direction);
+					goto fail;
+				}
+				continue;
+			}
+			pr_warn("cannot read value\n");
+			goto fail;
+		}
+
+		if (!strcmp(option, "sip")) {
+			if (ip_ver == 4) {
+				if (!in4_pton(value, -1, (uint8_t *)&sip_addr[0], -1, NULL)) {
+					pr_err("invalid source IP V4 value: %s\n", value);
+					goto fail;
+				}
+			} else if (ip_ver ==6) {
+				if (!in6_pton(value, -1, (uint8_t *)sip_addr, -1, NULL)) {
+					pr_err("invalid source IP V6 value: %s\n", value);
+					goto fail;
+				}
+			} else {
+				pr_err("ip_ver hasn't been set yet\n");
+				goto fail;
+			}
+			continue;
+		}
+
+		if (!strcmp(option, "dip")) {
+			if (ip_ver == 4) {
+				if (!in4_pton(value, -1, (uint8_t *)&dip_addr[0], -1, NULL)) {
+					pr_err("invalid destination IP V4 value: %s\n", value);
+					goto fail;
+				}
+			} else if (ip_ver == 6) {
+				if (!in6_pton(value, -1, (uint8_t *)dip_addr, -1, NULL)) {
+					pr_err("invalid destination IP V6 value: %s\n", value);
+					goto fail;
+				}
+			} else {
+				pr_err("ip_ver hasn't been set yet\n");
+				goto fail;
+			}
+			continue;
+		}
+
+		pr_warn("unrecognized option: %s\n", option);
+		goto fail;
+	}
+
+	kfree(cmd_buf);
+
+	if (cmd == ECM_SFE_L2_POLICY_RULE_ADD) {
+		if (!ecm_sfe_l2_add_policy_rule(ip_ver, sip_addr, sport, dip_addr, dport, protocol, direction)) {
+			pr_err("Add policy rule failed\n");
+			return -ENOMEM;
+		}
+	} else if (cmd == ECM_SFE_L2_POLICY_RULE_DEL) {
+		if (!ecm_sfe_l2_delete_policy_rule(ip_ver, sip_addr, sport, dip_addr, dport, protocol)) {
+			pr_err("Delete policy rule failed\n");
+			return -ENOMEM;
+		}
+	} else if (cmd == ECM_SFE_L2_POLICY_RULE_FLUSH_ALL) {
+		ecm_sfe_l2_flush_policy_rules();
+	}
+
+	return count;
+fail:
+	kfree(cmd_buf);
+	return -EINVAL;
+}
+
+/*
+ * ecm_sfe_l2_policy_rule_seq_show()
+ */
+static int ecm_sfe_l2_policy_rule_seq_show(struct seq_file *m, void *v)
+{
+	struct ecm_sfe_l2_policy_rule *rule;
+
+	rule = list_entry(v, struct ecm_sfe_l2_policy_rule, list);
+
+	if (rule->ip_ver == 4) {
+		seq_printf(m,	"ip_ver: %d"
+				"\tprotocol: %d"
+				"\tsip_addr: %pI4"
+				"\tdip_addr: %pI4"
+				"\tsport: %d"
+				"\tdport: %d"
+				"\tdirection: %d\n",
+				rule->ip_ver,
+				rule->protocol,
+				&rule->src_addr[0],
+				&rule->dest_addr[0],
+				rule->src_port,
+				rule->dest_port,
+				rule->direction);
+	} else {
+		struct in6_addr saddr;
+		struct in6_addr daddr;
+
+		memcpy(&saddr.s6_addr32, rule->src_addr, sizeof(uint32_t) * 4);
+		memcpy(&daddr.s6_addr32, rule->dest_addr, sizeof(uint32_t) * 4);
+
+		seq_printf(m,	"ip_ver: %d"
+				"\tprotocol: %d"
+				"\tsip_addr: %pI6"
+				"\tdip_addr: %pI6"
+				"\tsport: %d"
+				"\tdport: %d"
+				"\tdirection: %d\n",
+				rule->ip_ver,
+				rule->protocol,
+				&saddr,
+				&daddr,
+				rule->src_port,
+				rule->dest_port,
+				rule->direction);
+	}
+
+	return 0;
+}
+
+/*
+ * ecm_sfe_l2_policy_rule_seq_stop()
+ */
+static void ecm_sfe_l2_policy_rule_seq_stop(struct seq_file *p, void *v)
+{
+	spin_unlock_bh(&ecm_sfe_l2_policy_rules_lock);
+}
+
+/*
+ * ecm_sfe_l2_policy_rule_seq_next()
+ */
+static void *ecm_sfe_l2_policy_rule_seq_next(struct seq_file *p, void *v,
+					loff_t *pos)
+{
+	return seq_list_next(v, &ecm_sfe_l2_policy_rules, pos);
+}
+
+/*
+ * ecm_sfe_l2_policy_rule_seq_start()
+ */
+static void *ecm_sfe_l2_policy_rule_seq_start(struct seq_file *m, loff_t *_pos)
+{
+	spin_lock_bh(&ecm_sfe_l2_policy_rules_lock);
+	return seq_list_start(&ecm_sfe_l2_policy_rules, *_pos);
+}
+
+static const struct seq_operations ecm_sfe_l2_policy_rule_seq_ops = {
+	.start = ecm_sfe_l2_policy_rule_seq_start,
+	.next  = ecm_sfe_l2_policy_rule_seq_next,
+	.stop  = ecm_sfe_l2_policy_rule_seq_stop,
+	.show  = ecm_sfe_l2_policy_rule_seq_show,
+};
+
+/*
+ * ecm_sfe_l2_policy_rule_open()
+ */
+static int ecm_sfe_l2_policy_rule_open(struct inode *inode, struct file *file)
+{
+	return seq_open(file, &ecm_sfe_l2_policy_rule_seq_ops);
+}
+
+/*
+ * File operations for policy rules add/delete/list operations.
+ */
+static const struct file_operations ecm_sfe_l2_policy_rule_fops = {
+	.owner		= THIS_MODULE,
+	.open		= ecm_sfe_l2_policy_rule_open,
+	.write		= ecm_sfe_l2_policy_rule_write,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= seq_release,
+};
+
+/*
+ * ecm_sfe_l2_defunct_by_5tuple_write()
+ *	Writes the defunct by 5-tuple command to the debugfs node.
+ */
+static ssize_t ecm_sfe_l2_defunct_by_5tuple_write(struct file *f, const char *user_buf,
+					  size_t count, loff_t *offset)
+{
+	int ret = -EINVAL;
+	char *cmd_buf;
+	int field_count;
+	char *fields_ptr;
+	char *fields[ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_MAX];
+	char *option, *value;
+	int ip_ver;
+	uint32_t sip_addr_v4;
+	uint32_t dip_addr_v4;
+	struct in6_addr sip_addr_v6;
+	struct in6_addr dip_addr_v6;
+	int sport, dport;
+	int protocol;
+	bool defunct_result;
+
+	/*
+	 * Command is formed as for IPv4 and IPv6 5-tuples as below respectively.
+	 *
+	 * echo "ip_ver=4 sip=192.168.1.100 sport=443 dip=192.168.2.100 dport=1000 protocol=6" > /sys/kernel/debug/ecm_sfe_l2/defunct_by_5tuple
+	 * echo “ip_ver=6 sip=2aaa::100 sport=443 dip=3bbb::200 dport=1000 protocol=6” > /sys/kernel/debug/ecm_sfe_l2/defunct_by_5tuple
+	 *
+	 * The order of the options MUST be as above and it MUST contain all the 5-tuple fields and the ip_ver.
+	 */
+	cmd_buf = kzalloc(count + 1, GFP_ATOMIC);
+	if (!cmd_buf) {
+		pr_warn("unable to allocate memory for cmd buffer\n");
+		return -ENOMEM;
+	}
+
+	count = simple_write_to_buffer(cmd_buf, count, offset, user_buf, count);
+
+	/*
+	 * Split the buffer into its fields
+	 */
+	field_count = 0;
+	fields_ptr = cmd_buf;
+	fields[field_count] = strsep(&fields_ptr, " ");
+	while (fields[field_count] != NULL) {
+		pr_info("Field %d: %s\n", field_count, fields[field_count]);
+		field_count++;
+		if (field_count == ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_MAX)
+			break;
+
+		fields[field_count] = strsep(&fields_ptr, " ");
+	}
+
+	if (field_count != ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_MAX) {
+		kfree(cmd_buf);
+		pr_err("Invalid field count %d\n", field_count);
+		return -EINVAL;
+	}
+
+	/*
+	 * IP version (ip_ver) field validation.
+	 */
+	option = strsep(&fields[ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_IP_VERSION], "=");
+	if (!option || strcmp(option, "ip_ver")) {
+		pr_err("invalid IP version option name: %s\n", option);
+		goto fail;
+	}
+	value = fields[ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_IP_VERSION];
+	if (!sscanf(value, "%d", &ip_ver)) {
+		pr_err("Unable to read IP version value %s\n", value);
+		goto fail;
+	}
+	if (ip_ver != 4 && ip_ver != 6) {
+		pr_err("invalid IP version: %d\n", ip_ver);
+		goto fail;
+	}
+
+	/*
+	 * Source IP (sip) field validation.
+	 */
+	option = strsep(&fields[ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_SIP], "=");
+	if (!option || strcmp(option, "sip")) {
+		pr_err("invalid source IP option name: %s\n", option);
+		goto fail;
+	}
+	value = fields[ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_SIP];
+
+	if (ip_ver == 4) {
+		if (!in4_pton(value, -1, (uint8_t *)&sip_addr_v4, -1, NULL)) {
+			pr_err("invalid source IP V4 value: %s\n", value);
+			goto fail;
+		}
+	} else {
+		if (!in6_pton(value, -1, (uint8_t *)sip_addr_v6.s6_addr, -1, NULL)) {
+			pr_err("invalid source IP V6 value: %s\n", value);
+			goto fail;
+		}
+	}
+
+	/*
+	 * Source port (sport) field validadtion.
+	 */
+	option = strsep(&fields[ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_SPORT], "=");
+	if (!option || strcmp(option, "sport")) {
+		pr_err("invalid source port option name: %s\n", option);
+		goto fail;
+	}
+	value = fields[ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_SPORT];
+	if (!sscanf(value, "%d", &sport)) {
+		pr_err("Unable to read source port value %s\n", value);
+		goto fail;
+	}
+
+	/*
+	 * Destination IP (dip) field validation.
+	 */
+	option = strsep(&fields[ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_DIP], "=");
+	if (!option || strcmp(option, "dip")) {
+		pr_err("invalid destination IP option name: %s\n", option);
+		goto fail;
+	}
+	value = fields[ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_DIP];
+
+	if (ip_ver == 4) {
+		if (!in4_pton(value, -1, (uint8_t *)&dip_addr_v4, -1, NULL)) {
+			pr_err("invalid destination IP V4 value: %s\n", value);
+			goto fail;
+		}
+	} else {
+		if (!in6_pton(value, -1, (uint8_t *)dip_addr_v6.s6_addr, -1, NULL)) {
+			pr_err("invalid destination IP V6 value: %s\n", value);
+			goto fail;
+		}
+	}
+
+	/*
+	 * Destination port (dport) field validadtion.
+	 */
+	option = strsep(&fields[ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_DPORT], "=");
+	if (!option || strcmp(option, "dport")) {
+		pr_err("invalid destination port option name: %s\n", option);
+		goto fail;
+	}
+	value = fields[ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_DPORT];
+	if (!sscanf(value, "%d", &dport)) {
+		pr_err("Unable to read destination port value %s\n", value);
+		goto fail;
+	}
+
+	/*
+	 * Protocol field validadtion.
+	 */
+	option = strsep(&fields[ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_PROTOCOL], "=");
+	if (!option || strcmp(option, "protocol")) {
+		pr_err("invalid protocol option name: %s\n", option);
+		goto fail;
+	}
+	value = fields[ECM_SFE_L2_DEFUNCT_BY_5TUPLE_OPTION_PROTOCOL];
+	if (!sscanf(value, "%d", &protocol)) {
+		pr_err("Unable to read protocol value %s\n", value);
+		goto fail;
+	}
+
+	/*
+	 * Call 5-tuple defunct functions.
+	 */
+	if (ip_ver == 4) {
+		pr_debug("sip: %pI4 sport: %d dip: %pI4 dport: %d protocol: %d\n", &sip_addr_v4, sport, &dip_addr_v4, dport, protocol);
+		defunct_result = ecm_sfe_common_defunct_ipv4_connection(sip_addr_v4, htons(sport), dip_addr_v4, htons(dport), protocol);
+	} else {
+		pr_debug("sip: %pI6 sport: %d dip: %pI6 dport: %d protocol: %d\n", &sip_addr_v6, sport, &dip_addr_v6, dport, protocol);
+		defunct_result = ecm_sfe_common_defunct_ipv6_connection(&sip_addr_v6, htons(sport), &dip_addr_v6, htons(dport), protocol);
+	}
+
+	if (!defunct_result) {
+		pr_warn("No connection found with this 5-tuple\n");
+	}
+
+	ret = count;
+fail:
+	kfree(cmd_buf);
+
+	return ret;
+}
+
+/*
+ * File operations for defunct by 5-tuple operations.
+ */
+static struct file_operations ecm_sfe_l2_defunct_by_5tuple_fops = {
+	.owner = THIS_MODULE,
+	.write = ecm_sfe_l2_defunct_by_5tuple_write,
+};
+
+/*
+ * ecm_sfe_l2_defunct_by_port_write()
+ *	Writes the defunct by port command to the debugfs node.
+ */
+static ssize_t ecm_sfe_l2_defunct_by_port_write(struct file *f, const char *user_buf,
+					  size_t count, loff_t *offset)
+{
+	char *cmd_buf;
+	char *fields;
+	char *option, *value;
+	int port;
+	int direction;
+
+	/*
+	 * Command is formed as:
+	 *
+	 * echo “sport=443” > /sys/kernel/debug/ecm_sfe_l2/defunct_by_port
+	 * echo “dport=443” > /sys/kernel/debug/ecm_sfe_l2/defunct_by_port
+	 */
+	cmd_buf = kzalloc(count + 1, GFP_ATOMIC);
+	if (!cmd_buf) {
+		pr_warn("unable to allocate memory for cmd buffer\n");
+		return -ENOMEM;
+	}
+
+	count = simple_write_to_buffer(cmd_buf, count, offset, user_buf, count);
+
+	/*
+	 * Split the buffer into its fields
+	 */
+	fields = cmd_buf;
+	option = strsep(&fields, "=");
+	if (!strcmp(option, "sport")) {
+		direction = 0;
+	} else if (!strcmp(option, "dport")) {
+		direction = 1;
+	} else {
+		pr_err("invalid option name: %s\n", option);
+		kfree(cmd_buf);
+		return -EINVAL;
+	}
+
+	value = fields;
+	if (!sscanf(value, "%d", &port)) {
+		pr_err("Unable to read port value %s\n", value);
+		kfree(cmd_buf);
+		return -EINVAL;
+	}
+	pr_debug("option: %s value: %d\n", option, port);
+
+	kfree(cmd_buf);
+
+	/*
+	 * Call port based defunct function.
+	 */
+	ecm_sfe_common_defunct_by_port(port, direction, wan_name);
+
+	return count;
+}
+
+/*
+ * File operations for defunct by port operations.
+ */
+static struct file_operations ecm_sfe_l2_defunct_by_port_fops = {
+	.owner = THIS_MODULE,
+	.write = ecm_sfe_l2_defunct_by_port_write,
+};
+
+/*
+ * ecm_sfe_l2_defunct_by_protocol_write()
+ *	Writes the defunct by protocol command to the debugfs node.
+ */
+static ssize_t ecm_sfe_l2_defunct_by_protocol_write(struct file *f, const char *user_buf,
+					  size_t count, loff_t *offset)
+{
+	char *cmd_buf;
+	char *fields;
+	char *option, *value;
+	int protocol;
+
+	/*
+	 * Command is formed as:
+	 *
+	 * echo “protocol=6” > /sys/kernel/debug/ecm_sfe_l2/defunct_by_protocol
+	 */
+	cmd_buf = kzalloc(count + 1, GFP_ATOMIC);
+	if (!cmd_buf) {
+		pr_warn("unable to allocate memory for cmd buffer\n");
+		return -ENOMEM;
+	}
+
+	count = simple_write_to_buffer(cmd_buf, count, offset, user_buf, count);
+
+	/*
+	 * Split the buffer into its fields
+	 */
+	fields = cmd_buf;
+	option = strsep(&fields, "=");
+	if (strcmp(option, "protocol")) {
+		pr_err("invalid option name: %s\n", option);
+		kfree(cmd_buf);
+		return -EINVAL;
+	}
+
+	value = fields;
+	if (!sscanf(value, "%d", &protocol)) {
+		pr_err("Unable to read protocol value %s\n", value);
+		kfree(cmd_buf);
+		return -EINVAL;
+	}
+	pr_debug("option: %s value: %d\n", option, protocol);
+
+	kfree(cmd_buf);
+
+	/*
+	 * Defunct the connections which has this protocol number.
+	 */
+	ecm_sfe_common_defunct_by_protocol(protocol);
+
+	return count;
+}
+
+/*
+ * File operations for defunct by protocol operations.
+ */
+static struct file_operations ecm_sfe_l2_defunct_by_protocol_fops = {
+	.owner = THIS_MODULE,
+	.write = ecm_sfe_l2_defunct_by_protocol_write,
+};
+
+/*
+ * ecm_sfe_l2_wan_name_read()
+ *	Reads the WAN interface name from the debugfs node wan_name
+ */
+static ssize_t ecm_sfe_l2_wan_name_read(struct file *f, char *buffer,
+					 size_t len, loff_t *offset)
+{
+	return simple_read_from_buffer(buffer, len, offset, wan_name, wan_name_len);
+}
+
+/*
+ * ecm_sfe_l2_wan_name_write()
+ *	Writes the WAN interface name to the debugfs node wan_name
+ */
+static ssize_t ecm_sfe_l2_wan_name_write(struct file *f, const char *buffer,
+					  size_t len, loff_t *offset)
+{
+	ssize_t ret;
+
+	if (len > IFNAMSIZ) {
+		pr_err("WAN interface name is too long\n");
+		return -EINVAL;
+	}
+
+	ret = simple_write_to_buffer(wan_name, IFNAMSIZ, offset, buffer, len);
+	if (ret < 0) {
+		pr_err("WAN interface name cannot be written\n");
+		return ret;
+	}
+
+	wan_name[ret - 1] = '\0';
+	wan_name_len = ret;
+
+	return ret;
+}
+
+/*
+ * File operations for wan interface name.
+ */
+static struct file_operations ecm_sfe_l2_wan_name_fops = {
+	.owner = THIS_MODULE,
+	.write = ecm_sfe_l2_wan_name_write,
+	.read = ecm_sfe_l2_wan_name_read,
+};
+
+struct ecm_sfe_common_callbacks sfe_cbs = {
+	.l2_accel_check = ecm_sfe_l2_accel_check_callback,	/**< Callback to decide if L2 acceleration is wanted for the flow. */
+};
+
+/*
+ * ecm_sfe_l2_init()
+ */
+static int __init ecm_sfe_l2_init(void)
+{
+	pr_debug("ECM SFE L2 module INIT\n");
+
+	/*
+	 * Create entries in DebugFS for control functions
+	 */
+	ecm_sfe_l2_dentry = debugfs_create_dir("ecm_sfe_l2", NULL);
+	if (!ecm_sfe_l2_dentry) {
+		pr_info("Failed to create SFE L2 directory entry\n");
+		return -1;
+	}
+
+	if (!debugfs_create_file("wan_name", S_IWUSR, ecm_sfe_l2_dentry,
+					NULL, &ecm_sfe_l2_wan_name_fops)) {
+		pr_debug("Failed to create ecm wan interface file in debugfs\n");
+		debugfs_remove_recursive(ecm_sfe_l2_dentry);
+		return -1;
+	}
+
+	if (!debugfs_create_file("policy_rules", S_IWUSR, ecm_sfe_l2_dentry,
+					NULL, &ecm_sfe_l2_policy_rule_fops)) {
+		pr_debug("Failed to create ecm SFE L2 policy rules file in debugfs\n");
+		debugfs_remove_recursive(ecm_sfe_l2_dentry);
+		return -1;
+	}
+
+	if (!debugfs_create_file("defunct_by_protocol", S_IWUSR, ecm_sfe_l2_dentry,
+					NULL, &ecm_sfe_l2_defunct_by_protocol_fops)) {
+		pr_debug("Failed to create ecm defunct by protocol file in debugfs\n");
+		debugfs_remove_recursive(ecm_sfe_l2_dentry);
+		return -1;
+	}
+
+	if (!debugfs_create_file("defunct_by_5tuple", S_IWUSR, ecm_sfe_l2_dentry,
+					NULL, &ecm_sfe_l2_defunct_by_5tuple_fops)) {
+		pr_debug("Failed to create ecm defunct by 5tuple file in debugfs\n");
+		debugfs_remove_recursive(ecm_sfe_l2_dentry);
+		return -1;
+	}
+
+	if (!debugfs_create_file("defunct_by_port", S_IWUSR, ecm_sfe_l2_dentry,
+					NULL, &ecm_sfe_l2_defunct_by_port_fops)) {
+		pr_debug("Failed to create ecm defunct by port file in debugfs\n");
+		debugfs_remove_recursive(ecm_sfe_l2_dentry);
+		return -1;
+	}
+
+	if (ecm_sfe_common_callbacks_register(&sfe_cbs)) {
+		pr_debug("Failed to register callbacks\n");
+		debugfs_remove_recursive(ecm_sfe_l2_dentry);
+		return -1;
+	}
+	return 0;
+}
+
+/*
+ * ecm_sfe_l2_exit()
+ */
+static void __exit ecm_sfe_l2_exit(void)
+{
+	pr_debug("ECM SFE L2 check module EXIT\n");
+
+	ecm_sfe_common_callbacks_unregister();
+
+	/*
+	 * Remove the debugfs files recursively.
+	 */
+	debugfs_remove_recursive(ecm_sfe_l2_dentry);
+}
+
+module_init(ecm_sfe_l2_init)
+module_exit(ecm_sfe_l2_exit)
+
+MODULE_DESCRIPTION("ECM SFE L2 Module");
+#ifdef MODULE_LICENSE
+MODULE_LICENSE("Dual BSD/GPL");
+#endif
diff --git a/qca-nss-ecm/exports/ecm_sfe_common_public.h b/qca-nss-ecm/exports/ecm_sfe_common_public.h
new file mode 100644
index 0000000..41f6b6e
--- /dev/null
+++ b/qca-nss-ecm/exports/ecm_sfe_common_public.h
@@ -0,0 +1,136 @@
+/*
+ **************************************************************************
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ **************************************************************************
+ */
+
+/**
+ * @file ecm_fe_common_public.h
+ *	ECM SFE frontend public APIs and data structures.
+ */
+
+#ifndef __ECM_SFE_COMMON_PUBLIC_H__
+#define __ECM_SFE_COMMON_PUBLIC_H__
+
+/**
+ * @addtogroup ecm_sfe_common_subsystem
+ * @{
+ */
+
+#define ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED (1 << 0)	/**< L2 acceleration is allowed on the flow interface. */
+#define ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED (1 << 1)	/**< L2 acceleration is allowed on the return interface. */
+
+/**
+ * SFE common 5-tuple for external use.
+ */
+struct ecm_sfe_common_tuple {
+	uint32_t src_addr[4];	/**< Source IP in host order. */
+	uint32_t dest_addr[4];	/**< Destination IP in host order. */
+
+	uint16_t src_port;	/**< Source port port in host order. */
+	uint16_t dest_port;	/**< Destination port in host order. */
+	uint32_t src_ifindex;	/**< Source L2 interface index */
+	uint32_t dest_ifindex;	/**< Destination L2 interface index */
+	uint8_t protocol;	/**< Next protocol header number. */
+	uint8_t ip_ver;		/**< IP version 4 or 6. */
+};
+
+/**
+ * Callback to which SFE clients will register and return bitmap of values that indicate L2 acceleration for each direction.
+ */
+typedef uint32_t (*ecm_sfe_common_l2_accel_check_callback_t)(struct ecm_sfe_common_tuple *tuple);
+
+/**
+ * Data structure for SFE common callbacks.
+ */
+struct ecm_sfe_common_callbacks {
+	ecm_sfe_common_l2_accel_check_callback_t l2_accel_check;	/**< Callback to decide if L2 acceleration is wanted for the flow. */
+};
+
+/**
+ * Defuncts an IPv4 5-tuple connection.
+ *
+ * @param	src_ip		The source IP address.
+ * @param	src_port	The source port.
+ * @param	dest_ip		The destination IP address.
+ * @param	dest_port	The destination port.
+ * @param	protocol	The protocol.
+ *
+ * @return
+ * True if defuncted; false if not.
+ */
+bool ecm_sfe_common_defunct_ipv4_connection(__be32 src_ip, int src_port,
+						__be32 dest_ip, int dest_port, int protocol);
+
+/**
+ * Defuncts an IPv6 5-tuple connection.
+ *
+ * @param	src_ip		The source IP address.
+ * @param	src_port	The source port.
+ * @param	dest_ip		The destination IP address.
+ * @param	dest_port	The destination port.
+ * @param	protocol	The protocol.
+ *
+ * @return
+ * True if defuncted; false if not.
+ */
+bool ecm_sfe_common_defunct_ipv6_connection(struct in6_addr *src_ip, int src_port,
+						struct in6_addr *dest_ip, int dest_port, int protocol);
+
+/**
+ * Defuncts all the connections with this protocol type.
+ *
+ * @param protocol Protocol type.
+ *
+ * @return
+ * None.
+ */
+void ecm_sfe_common_defunct_by_protocol(int protocol);
+
+/**
+ * Defuncts all the connections with this port number in the correct direction.
+ *
+ * @param	port		The port number.
+ * @param	direction 	The direction of the port (source (1) or destination (2))
+ * @param	wan_name	The WAN port interface name.
+ *
+ * @return
+ * None.
+ */
+void ecm_sfe_common_defunct_by_port(int port, int direction, char *wan_name);
+
+/**
+ * Registers a client for SFE common callbacks.
+ *
+ * @param sfe_cb SFE common callback pointer.
+ *
+ * @return
+ * 0 if success, error value if fails.
+ */
+int ecm_sfe_common_callbacks_register(struct ecm_sfe_common_callbacks *sfe_cb);
+
+/**
+ * Unregisters a client from SFE common callbacks.
+ *
+ * @return
+ * None.
+ */
+void ecm_sfe_common_callbacks_unregister(void);
+
+/**
+ * @}
+ */
+
+#endif /* __ECM_SFE_COMMON_PUBLIC_H__ */
diff --git a/qca-nss-ecm/frontends/cmn/ecm_non_ported_ipv4.c b/qca-nss-ecm/frontends/cmn/ecm_non_ported_ipv4.c
index 20f7d0d..c28f770 100644
--- a/qca-nss-ecm/frontends/cmn/ecm_non_ported_ipv4.c
+++ b/qca-nss-ecm/frontends/cmn/ecm_non_ported_ipv4.c
@@ -295,7 +295,7 @@
 		case ECM_AE_CLASSIFIER_RESULT_NSS:
 			if (!ecm_nss_feature_check(skb, ip_hdr)) {
 				DEBUG_WARN("Unsupported feature found for NSS acceleration\n");
-				goto fail_1;
+				return NF_ACCEPT;
 			}
 			defunct_callback = ecm_nss_non_ported_ipv4_connection_defunct_callback;
 			feci = (struct ecm_front_end_connection_instance *)ecm_nss_non_ported_ipv4_connection_instance_alloc(can_accel, protocol, &nci);
diff --git a/qca-nss-ecm/frontends/cmn/ecm_non_ported_ipv6.c b/qca-nss-ecm/frontends/cmn/ecm_non_ported_ipv6.c
index 7658828..ba50952 100644
--- a/qca-nss-ecm/frontends/cmn/ecm_non_ported_ipv6.c
+++ b/qca-nss-ecm/frontends/cmn/ecm_non_ported_ipv6.c
@@ -294,7 +294,7 @@
 		case ECM_AE_CLASSIFIER_RESULT_NSS:
 			if (!ecm_nss_feature_check(skb, ip_hdr)) {
 				DEBUG_WARN("Unsupported feature found for NSS acceleration\n");
-				goto fail_1;
+				return NF_ACCEPT;
 			}
 			defunct_callback = ecm_nss_non_ported_ipv6_connection_defunct_callback;
 			feci = (struct ecm_front_end_connection_instance *)ecm_nss_non_ported_ipv6_connection_instance_alloc(can_accel, protocol, &nci);
diff --git a/qca-nss-ecm/frontends/ecm_front_end_common.c b/qca-nss-ecm/frontends/ecm_front_end_common.c
index 40e6abe..f6b5300 100644
--- a/qca-nss-ecm/frontends/ecm_front_end_common.c
+++ b/qca-nss-ecm/frontends/ecm_front_end_common.c
@@ -84,7 +84,7 @@
 
 	ECM_FE_FEATURE_SFE | ECM_FE_FEATURE_NON_PORTED | ECM_FE_FEATURE_CONN_LIMIT |	/* SFE type */
 	ECM_FE_FEATURE_OVS_BRIDGE | ECM_FE_FEATURE_OVS_VLAN | ECM_FE_FEATURE_BRIDGE |
-	ECM_FE_FEATURE_BONDING,
+	ECM_FE_FEATURE_BONDING | ECM_FE_FEATURE_SRC_IF_CHECK,
 
 	ECM_FE_FEATURE_NSS | ECM_FE_FEATURE_SFE | ECM_FE_FEATURE_NON_PORTED | ECM_FE_FEATURE_BRIDGE |
 	ECM_FE_FEATURE_MULTICAST | ECM_FE_FEATURE_BONDING | ECM_FE_FEATURE_IGS |
@@ -612,6 +612,9 @@
 	 * Unregister sysctl table.
 	 */
 	if (ecm_front_end_ctl_tbl_hdr) {
+#ifdef ECM_FRONT_END_SFE_ENABLE
+		ecm_sfe_sysctl_tbl_exit();
+#endif
 		unregister_sysctl_table(ecm_front_end_ctl_tbl_hdr);
 	}
 }
diff --git a/qca-nss-ecm/frontends/include/ecm_front_end_common.h b/qca-nss-ecm/frontends/include/ecm_front_end_common.h
index 46c50d8..0c70a1d 100644
--- a/qca-nss-ecm/frontends/include/ecm_front_end_common.h
+++ b/qca-nss-ecm/frontends/include/ecm_front_end_common.h
@@ -312,6 +312,6 @@
 void ecm_front_end_common_sysctl_register(void);
 void ecm_front_end_common_sysctl_unregister(void);
 int ecm_sfe_sysctl_tbl_init(void);
+void ecm_sfe_sysctl_tbl_exit(void);
 
 #endif  /* __ECM_FRONT_END_COMMON_H */
-
diff --git a/qca-nss-ecm/frontends/include/ecm_front_end_types.h b/qca-nss-ecm/frontends/include/ecm_front_end_types.h
index f7e2de1..2dcf3c1 100644
--- a/qca-nss-ecm/frontends/include/ecm_front_end_types.h
+++ b/qca-nss-ecm/frontends/include/ecm_front_end_types.h
@@ -108,10 +108,11 @@
  * An acceleration mode less than zero indicates a connection that cannot be accelerated, maybe due to error.
  */
 enum ecm_front_end_acceleration_modes {
+	ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT_SHORT = -8,/* Acceleration has failed for a short time due to the connection has become defunct and waiting for the removal */
 	ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT = -7,	/* Acceleration has permanently failed due to the connection has become defunct */
 	ECM_FRONT_END_ACCELERATION_MODE_FAIL_DECEL = -6,	/* Acceleration has permanently failed due to deceleration malfunction */
 	ECM_FRONT_END_ACCELERATION_MODE_FAIL_NO_ACTION = -5,	/* Acceleration has permanently failed due to too many offloads that were rejected without any packets being offloaded */
-	ECM_FRONT_END_ACCELERATION_MODE_FAIL_ACCEL_ENGINE = -4,		/* Acceleration has permanently failed due to too many accel engine NAK's */
+	ECM_FRONT_END_ACCELERATION_MODE_FAIL_ACCEL_ENGINE = -4,	/* Acceleration has permanently failed due to too many accel engine NAK's */
 	ECM_FRONT_END_ACCELERATION_MODE_FAIL_DRIVER = -3,	/* Acceleration has permanently failed due to too many driver interaction failures */
 	ECM_FRONT_END_ACCELERATION_MODE_FAIL_RULE = -2,		/* Acceleration has permanently failed due to bad rule data */
 	ECM_FRONT_END_ACCELERATION_MODE_FAIL_DENIED = -1,	/* Acceleration has permanently failed due to can_accel denying accel */
diff --git a/qca-nss-ecm/frontends/nss/ecm_nss_multicast_ipv4.c b/qca-nss-ecm/frontends/nss/ecm_nss_multicast_ipv4.c
index 405c5f5..7591331 100644
--- a/qca-nss-ecm/frontends/nss/ecm_nss_multicast_ipv4.c
+++ b/qca-nss-ecm/frontends/nss/ecm_nss_multicast_ipv4.c
@@ -1584,7 +1584,7 @@
 	 * If connection became defunct then set mode so that no further accel/decel attempts occur.
 	 */
 	if (feci->is_defunct) {
-		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT;
+		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT_SHORT;
 	}
 
 	spin_unlock_bh(&feci->lock);
diff --git a/qca-nss-ecm/frontends/nss/ecm_nss_multicast_ipv6.c b/qca-nss-ecm/frontends/nss/ecm_nss_multicast_ipv6.c
index 29a490c..91d3a9d 100644
--- a/qca-nss-ecm/frontends/nss/ecm_nss_multicast_ipv6.c
+++ b/qca-nss-ecm/frontends/nss/ecm_nss_multicast_ipv6.c
@@ -1539,7 +1539,7 @@
 	 * If connection became defunct then set mode so that no further accel/decel attempts occur.
 	 */
 	if (feci->is_defunct) {
-		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT;
+		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT_SHORT;
 	}
 
 	spin_unlock_bh(&feci->lock);
diff --git a/qca-nss-ecm/frontends/nss/ecm_nss_non_ported_ipv4.c b/qca-nss-ecm/frontends/nss/ecm_nss_non_ported_ipv4.c
index 927355c..ec4f365 100644
--- a/qca-nss-ecm/frontends/nss/ecm_nss_non_ported_ipv4.c
+++ b/qca-nss-ecm/frontends/nss/ecm_nss_non_ported_ipv4.c
@@ -1460,7 +1460,7 @@
 	 * If connection became defunct then set mode so that no further accel/decel attempts occur.
 	 */
 	if (feci->is_defunct) {
-		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT;
+		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT_SHORT;
 	}
 	spin_unlock_bh(&feci->lock);
 
diff --git a/qca-nss-ecm/frontends/nss/ecm_nss_non_ported_ipv6.c b/qca-nss-ecm/frontends/nss/ecm_nss_non_ported_ipv6.c
index 086d108..13e0cc5 100644
--- a/qca-nss-ecm/frontends/nss/ecm_nss_non_ported_ipv6.c
+++ b/qca-nss-ecm/frontends/nss/ecm_nss_non_ported_ipv6.c
@@ -1321,7 +1321,7 @@
 	 * If connection became defunct then set mode so that no further accel/decel attempts occur.
 	 */
 	if (feci->is_defunct) {
-		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT;
+		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT_SHORT;
 	}
 	spin_unlock_bh(&feci->lock);
 
diff --git a/qca-nss-ecm/frontends/nss/ecm_nss_ported_ipv4.c b/qca-nss-ecm/frontends/nss/ecm_nss_ported_ipv4.c
index 8d87036..6375b6b 100644
--- a/qca-nss-ecm/frontends/nss/ecm_nss_ported_ipv4.c
+++ b/qca-nss-ecm/frontends/nss/ecm_nss_ported_ipv4.c
@@ -1557,7 +1557,7 @@
 	 * If connection became defunct then set mode so that no further accel/decel attempts occur.
 	 */
 	if (feci->is_defunct) {
-		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT;
+		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT_SHORT;
 	}
 
 	spin_unlock_bh(&feci->lock);
diff --git a/qca-nss-ecm/frontends/nss/ecm_nss_ported_ipv6.c b/qca-nss-ecm/frontends/nss/ecm_nss_ported_ipv6.c
index 886149f..1be8a58 100644
--- a/qca-nss-ecm/frontends/nss/ecm_nss_ported_ipv6.c
+++ b/qca-nss-ecm/frontends/nss/ecm_nss_ported_ipv6.c
@@ -1467,7 +1467,7 @@
 	 * If connection became defunct then set mode so that no further accel/decel attempts occur.
 	 */
 	if (feci->is_defunct) {
-		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT;
+		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT_SHORT;
 	}
 	spin_unlock_bh(&feci->lock);
 
diff --git a/qca-nss-ecm/frontends/sfe/ecm_sfe_common.c b/qca-nss-ecm/frontends/sfe/ecm_sfe_common.c
index f7aee0d..6cb1473 100644
--- a/qca-nss-ecm/frontends/sfe/ecm_sfe_common.c
+++ b/qca-nss-ecm/frontends/sfe/ecm_sfe_common.c
@@ -41,6 +41,18 @@
 #include "ecm_sfe_ipv4.h"
 #include "ecm_sfe_ipv6.h"
 #include "ecm_sfe_common.h"
+#include "exports/ecm_sfe_common_public.h"
+
+
+/*
+ * Callback object to support SFE frontend interaction with external code
+ */
+struct ecm_sfe_common_callbacks ecm_sfe_cb;
+
+/*
+ * Sysctl table
+ */
+static struct ctl_table_header *ecm_sfe_ctl_tbl_hdr;
 
 static bool ecm_sfe_fast_xmit_enable = true;
 
@@ -278,7 +290,8 @@
  */
 int ecm_sfe_sysctl_tbl_init()
 {
-	if (!register_sysctl(ECM_FRONT_END_SYSCTL_PATH, ecm_sfe_sysctl_tbl)) {
+	ecm_sfe_ctl_tbl_hdr = register_sysctl(ECM_FRONT_END_SYSCTL_PATH, ecm_sfe_sysctl_tbl);
+	if (!ecm_sfe_ctl_tbl_hdr) {
 		DEBUG_WARN("Unable to register ecm_sfe_sysctl_tbl");
 		return -EINVAL;
 	}
@@ -287,6 +300,17 @@
 }
 
 /*
+ * ecm_sfe_sysctl_tbl_exit()
+ * 	Unregister sysctl for SFE
+ */
+void ecm_sfe_sysctl_tbl_exit()
+{
+	if (ecm_sfe_ctl_tbl_hdr) {
+		unregister_sysctl_table(ecm_sfe_ctl_tbl_hdr);
+	}
+}
+
+/*
  * ecm_sfe_common_init_fe_info()
  *	Initialize common fe info
  */
@@ -371,3 +395,119 @@
 		break;
 	}
 }
+
+/*
+ * ecm_sfe_common_tuple_set()
+ *	Sets the SFE common tuple object with the ECM connection rule paramaters.
+ *
+ * This tuple object will be used by external module to make decision on L2 acceleration.
+ */
+void ecm_sfe_common_tuple_set(struct ecm_front_end_connection_instance *feci,
+			      int32_t from_iface_id, int32_t to_iface_id,
+			      struct ecm_sfe_common_tuple *tuple)
+{
+	ip_addr_t saddr;
+	ip_addr_t daddr;
+
+	tuple->protocol = ecm_db_connection_protocol_get(feci->ci);
+	tuple->ip_ver = feci->ip_version;
+
+	tuple->src_port = ecm_db_connection_port_get(feci->ci, ECM_DB_OBJ_DIR_FROM);
+        tuple->dest_port = ecm_db_connection_port_get(feci->ci, ECM_DB_OBJ_DIR_TO);
+
+	tuple->src_ifindex = from_iface_id;
+	tuple->dest_ifindex = to_iface_id;
+
+	ecm_db_connection_address_get(feci->ci, ECM_DB_OBJ_DIR_FROM, saddr);
+	ecm_db_connection_address_get(feci->ci, ECM_DB_OBJ_DIR_TO, daddr);
+
+	if (feci->ip_version == 4) {
+		ECM_IP_ADDR_TO_NIN4_ADDR(tuple->src_addr[0], saddr);
+		ECM_IP_ADDR_TO_NIN4_ADDR(tuple->dest_addr[0], daddr);
+	} else {
+		ECM_IP_ADDR_TO_SFE_IPV6_ADDR(tuple->src_addr, saddr);
+		ECM_IP_ADDR_TO_SFE_IPV6_ADDR(tuple->dest_addr, daddr);
+	}
+}
+
+/*
+ * ecm_sfe_common_defunct_ipv4_connection()
+ *	Defunct an IPv4 5-tuple connection.
+ */
+bool ecm_sfe_common_defunct_ipv4_connection(__be32 src_ip, int src_port,
+					    __be32 dest_ip, int dest_port, int protocol)
+{
+	return ecm_db_connection_decel_v4(src_ip, src_port, dest_ip, dest_port, protocol);
+}
+EXPORT_SYMBOL(ecm_sfe_common_defunct_ipv4_connection);
+
+/*
+ * ecm_sfe_common_defunct_ipv6_connection()
+ *	Defunct an IPv6 5-tuple connection.
+ */
+bool ecm_sfe_common_defunct_ipv6_connection(struct in6_addr *src_ip, int src_port,
+					    struct in6_addr *dest_ip, int dest_port, int protocol)
+{
+	return ecm_db_connection_decel_v6(src_ip, src_port, dest_ip, dest_port, protocol);
+}
+EXPORT_SYMBOL(ecm_sfe_common_defunct_ipv6_connection);
+
+/*
+ * ecm_sfe_common_defunct_by_protocol()
+ *	Defunct the connections by the protocol type (e.g:TCP, UDP)
+ */
+void ecm_sfe_common_defunct_by_protocol(int protocol)
+{
+	ecm_db_connection_defunct_by_protocol(protocol);
+}
+EXPORT_SYMBOL(ecm_sfe_common_defunct_by_protocol);
+
+/*
+ * ecm_sfe_common_defunct_by_port()
+ *	Defunct the connections associated with this port in the direction
+ * relative to the ECM's connection direction as well.
+ *
+ * TODO:
+ *	For now, all the connections from/to this port number are defuncted.
+ *	Directional defunct can be implemented later, but there is a trade of here:
+ *	For each connection in the database, the connection's from/to interfaces will
+ *	be checked with the wan_name and direction will be determined and then the connection
+ *	will be defuncted if there is a match with this port number. This process may be heavier
+ *	than defuncting all the connections from/to this port number. So, the direction and  wan_name
+ *	are optional for this API for now.
+ */
+void ecm_sfe_common_defunct_by_port(int port, int direction, char *wan_name)
+{
+	ecm_db_connection_defunct_by_port(htons(port), ECM_DB_OBJ_DIR_FROM);
+	ecm_db_connection_defunct_by_port(htons(port), ECM_DB_OBJ_DIR_TO);
+}
+EXPORT_SYMBOL(ecm_sfe_common_defunct_by_port);
+
+/*
+ * ecm_sfe_common_callbacks_register()
+ *	Registers SFE common callbacks.
+ */
+int ecm_sfe_common_callbacks_register(struct ecm_sfe_common_callbacks *sfe_cb)
+{
+	if (!sfe_cb || !sfe_cb->l2_accel_check) {
+		DEBUG_ERROR("SFE L2 acceleration check callback is NULL\n");
+		return -EINVAL;
+	}
+
+	rcu_assign_pointer(ecm_sfe_cb.l2_accel_check, sfe_cb->l2_accel_check);
+	synchronize_rcu();
+
+	return 0;
+}
+EXPORT_SYMBOL(ecm_sfe_common_callbacks_register);
+
+/*
+ * ecm_sfe_common_callbacks_unregister()
+ *	Unregisters SFE common callbacks.
+ */
+void ecm_sfe_common_callbacks_unregister(void)
+{
+	rcu_assign_pointer(ecm_sfe_cb.l2_accel_check, NULL);
+	synchronize_rcu();
+}
+EXPORT_SYMBOL(ecm_sfe_common_callbacks_unregister);
diff --git a/qca-nss-ecm/frontends/sfe/ecm_sfe_common.h b/qca-nss-ecm/frontends/sfe/ecm_sfe_common.h
index cb240f9..54625f3 100644
--- a/qca-nss-ecm/frontends/sfe/ecm_sfe_common.h
+++ b/qca-nss-ecm/frontends/sfe/ecm_sfe_common.h
@@ -15,6 +15,13 @@
  * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
  */
 
+#include "ecm_sfe_common_public.h"
+
+/*
+ * Export the callback object for frontend usage.
+ */
+extern struct ecm_sfe_common_callbacks ecm_sfe_cb;
+
 #ifdef CONFIG_XFRM
 /*
  * Which type of ipsec process traffic need.
@@ -148,3 +155,6 @@
 uint32_t ecm_sfe_common_get_stats_bitmap(struct ecm_sfe_common_fe_info *fe_info, ecm_db_obj_dir_t dir);
 void ecm_sfe_common_set_stats_bitmap(struct ecm_sfe_common_fe_info *fe_info, ecm_db_obj_dir_t dir, uint8_t bit);
 void ecm_sfe_common_update_rule(struct ecm_front_end_connection_instance *feci, enum ecm_rule_update_type type, void *arg);
+void ecm_sfe_common_tuple_set(struct ecm_front_end_connection_instance *feci,
+			      int32_t from_iface_id, int32_t to_iface_id,
+			      struct ecm_sfe_common_tuple *tuple);
diff --git a/qca-nss-ecm/frontends/sfe/ecm_sfe_non_ported_ipv4.c b/qca-nss-ecm/frontends/sfe/ecm_sfe_non_ported_ipv4.c
index 9ebc5d4..ab92a8a 100644
--- a/qca-nss-ecm/frontends/sfe/ecm_sfe_non_ported_ipv4.c
+++ b/qca-nss-ecm/frontends/sfe/ecm_sfe_non_ported_ipv4.c
@@ -1101,9 +1101,8 @@
 
 	if (ecm_interface_src_check || ecm_db_connection_is_pppoe_bridged_get(feci->ci)) {
 		DEBUG_INFO("%px: Source interface check flag is enabled\n", nnpci);
-		/*
-		 * TO DO: No interface check rule create message type defined in SFE's API.
-		 */
+		nircm->rule_flags |= SFE_RULE_CREATE_FLAG_FLOW_SRC_INTERFACE_CHECK;
+		nircm->rule_flags |= SFE_RULE_CREATE_FLAG_RETURN_SRC_INTERFACE_CHECK;
 	}
 
 	if (pr->process_actions & ECM_CLASSIFIER_PROCESS_ACTION_QOS_TAG) {
@@ -1461,7 +1460,7 @@
 	 * If connection became defunct then set mode so that no further accel/decel attempts occur.
 	 */
 	if (feci->is_defunct) {
-		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT;
+		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT_SHORT;
 	}
 	spin_unlock_bh(&feci->lock);
 
diff --git a/qca-nss-ecm/frontends/sfe/ecm_sfe_non_ported_ipv6.c b/qca-nss-ecm/frontends/sfe/ecm_sfe_non_ported_ipv6.c
index afc33ce..9c99fae 100644
--- a/qca-nss-ecm/frontends/sfe/ecm_sfe_non_ported_ipv6.c
+++ b/qca-nss-ecm/frontends/sfe/ecm_sfe_non_ported_ipv6.c
@@ -1018,6 +1018,8 @@
 	 */
 	if (ecm_interface_src_check || ecm_db_connection_is_pppoe_bridged_get(feci->ci)) {
 		DEBUG_INFO("%px: Source interface check flag is enabled\n", nnpci);
+		nircm->rule_flags |= SFE_RULE_CREATE_FLAG_FLOW_SRC_INTERFACE_CHECK;
+		nircm->rule_flags |= SFE_RULE_CREATE_FLAG_RETURN_SRC_INTERFACE_CHECK;
 	}
 
 	/*
@@ -1340,7 +1342,7 @@
 	 * If connection became defunct then set mode so that no further accel/decel attempts occur.
 	 */
 	if (feci->is_defunct) {
-		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT;
+		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT_SHORT;
 	}
 	spin_unlock_bh(&feci->lock);
 
diff --git a/qca-nss-ecm/frontends/sfe/ecm_sfe_ported_ipv4.c b/qca-nss-ecm/frontends/sfe/ecm_sfe_ported_ipv4.c
index 524e118..c3f5d73 100644
--- a/qca-nss-ecm/frontends/sfe/ecm_sfe_ported_ipv4.c
+++ b/qca-nss-ecm/frontends/sfe/ecm_sfe_ported_ipv4.c
@@ -165,6 +165,7 @@
 	struct ecm_front_end_connection_instance *feci;
 	struct ecm_sfe_ported_ipv4_connection_instance *npci;
 	ecm_front_end_acceleration_mode_t result_mode;
+	bool is_defunct = false;
 
 	/*
 	 * Is this a response to a create message?
@@ -327,10 +328,28 @@
 
 	DEBUG_INFO("%px: Decelerate was pending\n", ci);
 
+	/*
+	 * Check if the pending decelerate was done with the defunct process.
+	 * If it was, set the is_defunct flag of the feci to false for re-try.
+	 */
+	if (feci->is_defunct) {
+		is_defunct = feci->is_defunct;
+		feci->is_defunct = false;
+	}
+
 	spin_unlock_bh(&ecm_sfe_ipv4_lock);
 	spin_unlock_bh(&feci->lock);
 
-	feci->decelerate(feci);
+	/*
+	 * If the pending decelerate was done through defunct process, we should
+	 * re-try it here with the same defunct function, because the purpose of that
+	 * process is to remove the connection from the database as well after decelerating it.
+	 */
+	if (is_defunct) {
+		ecm_db_connection_make_defunct(ci);
+	} else {
+		feci->decelerate(feci);
+	}
 
 	/*
 	 * Release the connection.
@@ -354,7 +373,7 @@
 		interface_num = msg->conn_rule.flow_interface_num;
 	}
 	if (ecm_sfe_common_fast_xmit_check(interface_num)) {
-		msg->rule_flags |= SFE_RULE_CREATE_FLAG_FLOW_TRANSMIT_FAST;
+		msg->rule_flags |= SFE_RULE_CREATE_FLAG_RETURN_TRANSMIT_FAST;
 	}
 
 	interface_num = msg->conn_rule.return_top_interface_num;
@@ -362,7 +381,7 @@
 		interface_num = msg->conn_rule.return_interface_num;
 	}
 	if (ecm_sfe_common_fast_xmit_check(interface_num)) {
-		msg->rule_flags |= SFE_RULE_CREATE_FLAG_RETURN_TRANSMIT_FAST;
+		msg->rule_flags |= SFE_RULE_CREATE_FLAG_FLOW_TRANSMIT_FAST;
 	}
 
 	rcu_read_unlock_bh();
@@ -402,6 +421,8 @@
 	uint8_t dest_mac_xlate[ETH_ALEN];
 	ecm_db_direction_t ecm_dir;
 	ecm_front_end_acceleration_mode_t result_mode;
+	uint32_t l2_accel_bits = (ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED | ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED);
+	ecm_sfe_common_l2_accel_check_callback_t l2_accel_check;
 
 	DEBUG_CHECK_MAGIC(npci, ECM_SFE_PORTED_IPV4_CONNECTION_INSTANCE_MAGIC, "%px: magic failed", npci);
 
@@ -488,6 +509,23 @@
 	nircm->conn_rule.return_interface_num = to_sfe_iface_id;
 
 	/*
+	 * Check which side of the connection can support L2 acceleration.
+	 * The check is done only for the routed flows and if the L2 feature is enabled.
+	 */
+	if (sfe_is_l2_feature_enabled() && ecm_db_connection_is_routed_get(feci->ci)) {
+		rcu_read_lock();
+		l2_accel_check = rcu_dereference(ecm_sfe_cb.l2_accel_check);
+		if (l2_accel_check) {
+			struct ecm_sfe_common_tuple l2_accel_tuple;
+
+			ecm_sfe_common_tuple_set(feci, from_sfe_iface_id, to_sfe_iface_id, &l2_accel_tuple);
+
+			l2_accel_bits = l2_accel_check(&l2_accel_tuple);
+		}
+		rcu_read_unlock();
+	}
+
+	/*
 	 * Set interface numbers involved in accelerating this connection.
 	 * These are the inner facing addresses from the heirarchy interface lists we got above.
 	 */
@@ -544,7 +582,7 @@
 				break;
 			}
 
-			if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_BRIDGE, list_index, from_ifaces_first)) {
+			if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_BRIDGE, list_index, from_ifaces_first) && (l2_accel_bits & ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED)) {
 				nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE;
 				feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_FROM, ECM_DB_IFACE_TYPE_BRIDGE);
 			}
@@ -570,7 +608,7 @@
 				break;
 			}
 
-			if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_OVS_BRIDGE, list_index, from_ifaces_first)) {
+			if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_OVS_BRIDGE, list_index, from_ifaces_first) && (l2_accel_bits & ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED)) {
 				nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE;
 				feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_FROM, ECM_DB_IFACE_TYPE_OVS_BRIDGE);
 			}
@@ -623,6 +661,15 @@
 				break;
 			}
 
+			/*
+			 * If external module decide that L2 acceleration is not allowed, we should return
+			 * without setting PPPoE parameters.
+			 */
+			if (!(l2_accel_bits & ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED)) {
+				DEBUG_TRACE("%px: L2 acceleration is not allowed for the PPPoE interface\n", npci);
+				break;
+			}
+
 			feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_FROM, ECM_DB_IFACE_TYPE_PPPOE);
 
 			/*
@@ -676,7 +723,7 @@
 			}
 			nircm->valid_flags |= SFE_RULE_CREATE_VLAN_VALID;
 
-			if (sfe_is_l2_feature_enabled()) {
+			if (sfe_is_l2_feature_enabled() && (l2_accel_bits & ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED)) {
 				nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE;
 				feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_FROM, ECM_DB_IFACE_TYPE_VLAN);
 
@@ -703,7 +750,7 @@
 			break;
 		case ECM_DB_IFACE_TYPE_MACVLAN:
 #ifdef ECM_INTERFACE_MACVLAN_ENABLE
-			if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_MACVLAN, list_index, from_ifaces_first)) {
+			if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_MACVLAN, list_index, from_ifaces_first) && (l2_accel_bits & ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED)) {
 				nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE;
 				feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_FROM, ECM_DB_IFACE_TYPE_MACVLAN);
 			}
@@ -740,7 +787,7 @@
 
 		case ECM_DB_IFACE_TYPE_LAG:
 #ifdef ECM_INTERFACE_BOND_ENABLE
-			if (sfe_is_l2_feature_enabled()) {
+			if (sfe_is_l2_feature_enabled() && (l2_accel_bits & ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED)) {
 				nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE;
 				/*
 				 * LAG device gets its stats by summing up all stats of its
@@ -822,7 +869,7 @@
 				break;
 			}
 
-			if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_BRIDGE, list_index, to_ifaces_first)) {
+			if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_BRIDGE, list_index, to_ifaces_first) && (l2_accel_bits & ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED)) {
 				nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE;
 				feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_TO, ECM_DB_IFACE_TYPE_BRIDGE);
 			}
@@ -849,7 +896,7 @@
 				break;
 			}
 
-			if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_OVS_BRIDGE, list_index, to_ifaces_first)) {
+			if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_OVS_BRIDGE, list_index, to_ifaces_first) && (l2_accel_bits & ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED)) {
 				nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE;
 				feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_TO, ECM_DB_IFACE_TYPE_OVS_BRIDGE);
 			}
@@ -902,6 +949,15 @@
 				break;
 			}
 
+			/*
+			 * If external module decide that L2 acceleration is not allowed, we should return
+			 * without setting PPPoE parameters.
+			 */
+			if (!(l2_accel_bits & ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED)) {
+				DEBUG_TRACE("%px: L2 acceleration is not allowed for the PPPoE interface\n", npci);
+				break;
+			}
+
 			feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_TO, ECM_DB_IFACE_TYPE_PPPOE);
 
 			/*
@@ -954,7 +1010,7 @@
 			}
 			nircm->valid_flags |= SFE_RULE_CREATE_VLAN_VALID;
 
-			if (sfe_is_l2_feature_enabled()) {
+			if (sfe_is_l2_feature_enabled() && (l2_accel_bits & ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED)) {
 				nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE;
 				feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_TO, ECM_DB_IFACE_TYPE_VLAN);
 
@@ -982,7 +1038,7 @@
 
 		case ECM_DB_IFACE_TYPE_MACVLAN:
 #ifdef ECM_INTERFACE_MACVLAN_ENABLE
-			if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_MACVLAN, list_index, to_ifaces_first)) {
+			if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_MACVLAN, list_index, to_ifaces_first) && (l2_accel_bits & ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED)) {
 				nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE;
 				feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_TO, ECM_DB_IFACE_TYPE_MACVLAN);
 			}
@@ -1019,7 +1075,7 @@
 
 		case ECM_DB_IFACE_TYPE_LAG:
 #ifdef ECM_INTERFACE_BOND_ENABLE
-			if (sfe_is_l2_feature_enabled()) {
+			if (sfe_is_l2_feature_enabled() && (l2_accel_bits & ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED)) {
 				nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE;
 				/*
 				 * LAG device gets its stats by summing up all stats of its
@@ -1070,6 +1126,29 @@
 		}
 	}
 
+	if (ecm_interface_src_check) {
+		DEBUG_INFO("%px: Source interface check flag is enabled\n", npci);
+		nircm->rule_flags |= SFE_RULE_CREATE_FLAG_FLOW_SRC_INTERFACE_CHECK;
+		nircm->rule_flags |= SFE_RULE_CREATE_FLAG_RETURN_SRC_INTERFACE_CHECK;
+	}
+
+	/*
+	 * Enable source interface check without flushing the rule for this flow to re-inject the packet to
+	 * the network stack in SFE driver after the first pass of the packet coming with the L2 interface.
+	 * In the second pass, the packet will come to SFE with the L3 interface. If there are more than 3 interfaces
+	 * in the hierarchy, the packet will be re-injected to the stack until the flows input interface matches with the
+	 * rule's match_dev.
+	 */
+	if (!(l2_accel_bits & ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED)) {
+		nircm->rule_flags |= SFE_RULE_CREATE_FLAG_FLOW_SRC_INTERFACE_CHECK;
+		nircm->rule_flags |= SFE_RULE_CREATE_FLAG_FLOW_SRC_INTERFACE_CHECK_NO_FLUSH;
+	}
+
+	if (!(l2_accel_bits & ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED)) {
+		nircm->rule_flags |= SFE_RULE_CREATE_FLAG_RETURN_SRC_INTERFACE_CHECK;
+		nircm->rule_flags |= SFE_RULE_CREATE_FLAG_RETURN_SRC_INTERFACE_CHECK_NO_FLUSH;
+	}
+
 	/*
 	 * Set up the flow and return qos tags
 	 */
@@ -1578,7 +1657,7 @@
 	 * If connection became defunct then set mode so that no further accel/decel attempts occur.
 	 */
 	if (feci->is_defunct) {
-		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT;
+		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT_SHORT;
 	}
 
 	spin_unlock_bh(&feci->lock);
diff --git a/qca-nss-ecm/frontends/sfe/ecm_sfe_ported_ipv6.c b/qca-nss-ecm/frontends/sfe/ecm_sfe_ported_ipv6.c
index fe0d642..a01ee76 100644
--- a/qca-nss-ecm/frontends/sfe/ecm_sfe_ported_ipv6.c
+++ b/qca-nss-ecm/frontends/sfe/ecm_sfe_ported_ipv6.c
@@ -168,6 +168,7 @@
 	ip_addr_t flow_ip;
 	ip_addr_t return_ip;
 	ecm_front_end_acceleration_mode_t result_mode;
+	bool is_defunct = false;
 
 	/*
 	 * Is this a response to a create message?
@@ -333,10 +334,28 @@
 
 	DEBUG_INFO("%px: Decelerate was pending\n", ci);
 
+	/*
+	 * Check if the pending decelerate was done with the defunct process.
+	 * If it was, set the is_defunct flag of the feci to false for re-try.
+	 */
+	if (feci->is_defunct) {
+		is_defunct = feci->is_defunct;
+		feci->is_defunct = false;
+	}
+
 	spin_unlock_bh(&ecm_sfe_ipv6_lock);
 	spin_unlock_bh(&feci->lock);
 
-	feci->decelerate(feci);
+	/*
+	 * If the pending decelerate was done through defunct process, we should
+	 * re-try it here with the same defunct function, because the purpose of that
+	 * process is to remove the connection from the database as well after decelerating it.
+	 */
+	if (is_defunct) {
+		ecm_db_connection_make_defunct(ci);
+	} else {
+		feci->decelerate(feci);
+	}
 
 	/*
 	 * Release the connection.
@@ -360,7 +379,7 @@
 		interface_num = msg->conn_rule.flow_interface_num;
 	}
 	if (ecm_sfe_common_fast_xmit_check(interface_num)) {
-		msg->rule_flags |= SFE_RULE_CREATE_FLAG_FLOW_TRANSMIT_FAST;
+		msg->rule_flags |= SFE_RULE_CREATE_FLAG_RETURN_TRANSMIT_FAST;
 	}
 
 	interface_num = msg->conn_rule.return_top_interface_num;
@@ -368,7 +387,7 @@
 		interface_num = msg->conn_rule.return_interface_num;
 	}
 	if (ecm_sfe_common_fast_xmit_check(interface_num)) {
-		msg->rule_flags |= SFE_RULE_CREATE_FLAG_RETURN_TRANSMIT_FAST;
+		msg->rule_flags |= SFE_RULE_CREATE_FLAG_FLOW_TRANSMIT_FAST;
 	}
 
 	rcu_read_unlock_bh();
@@ -407,6 +426,8 @@
 	ip_addr_t src_ip;
 	ip_addr_t dest_ip;
 	ecm_front_end_acceleration_mode_t result_mode;
+	uint32_t l2_accel_bits = (ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED | ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED);
+	ecm_sfe_common_l2_accel_check_callback_t l2_accel_check;
 
 	DEBUG_CHECK_MAGIC(npci, ECM_SFE_PORTED_IPV6_CONNECTION_INSTANCE_MAGIC, "%px: magic failed", npci);
 
@@ -493,6 +514,23 @@
 	nircm->conn_rule.return_interface_num = to_sfe_iface_id;
 
 	/*
+	 * Check which side of the connection can support L2 acceleration.
+	 * The check is done only for the routed flows and if the L2 feature is enabled.
+	 */
+	if (sfe_is_l2_feature_enabled() && ecm_db_connection_is_routed_get(feci->ci)) {
+		rcu_read_lock();
+		l2_accel_check = rcu_dereference(ecm_sfe_cb.l2_accel_check);
+		if (l2_accel_check) {
+			struct ecm_sfe_common_tuple l2_accel_tuple;
+
+			ecm_sfe_common_tuple_set(feci, from_sfe_iface_id, to_sfe_iface_id, &l2_accel_tuple);
+
+			l2_accel_bits = l2_accel_check(&l2_accel_tuple);
+		}
+		rcu_read_unlock();
+	}
+
+	/*
 	 * Set interface numbers involved in accelerating this connection.
 	 * These are the inner facing addresses from the heirarchy interface lists we got above.
 	 */
@@ -549,7 +587,7 @@
 				break;
 			}
 
-			if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_BRIDGE, list_index, from_ifaces_first)) {
+			if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_BRIDGE, list_index, from_ifaces_first) && (l2_accel_bits & ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED)) {
 				nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE;
 				feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_FROM, ECM_DB_IFACE_TYPE_BRIDGE);
 			}
@@ -575,7 +613,7 @@
 				break;
 			}
 
-			if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_OVS_BRIDGE, list_index, from_ifaces_first)) {
+			if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_OVS_BRIDGE, list_index, from_ifaces_first) && (l2_accel_bits & ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED)) {
 				nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE;
 				feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_FROM, ECM_DB_IFACE_TYPE_OVS_BRIDGE);
 			}
@@ -628,6 +666,15 @@
 				break;
 			}
 
+			/*
+			 * If external module decide that L2 acceleration is not allowed, we should return
+			 * without setting PPPoE parameters.
+			 */
+			if (!(l2_accel_bits & ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED)) {
+				DEBUG_TRACE("%px: L2 acceleration is not allowed for the PPPoE interface\n", npci);
+				break;
+			}
+
 			feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_FROM, ECM_DB_IFACE_TYPE_PPPOE);
 
 			/*
@@ -681,7 +728,7 @@
 			}
 			nircm->valid_flags |= SFE_RULE_CREATE_VLAN_VALID;
 
-			if (sfe_is_l2_feature_enabled()) {
+			if (sfe_is_l2_feature_enabled() && (l2_accel_bits & ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED)) {
 				nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE;
 				feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_FROM, ECM_DB_IFACE_TYPE_VLAN);
 
@@ -709,7 +756,7 @@
 
 		case ECM_DB_IFACE_TYPE_MACVLAN:
 #ifdef ECM_INTERFACE_MACVLAN_ENABLE
-			if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_MACVLAN, list_index, from_ifaces_first)) {
+			if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_MACVLAN, list_index, from_ifaces_first) && (l2_accel_bits & ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED)) {
 				nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE;
 				feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_FROM, ECM_DB_IFACE_TYPE_MACVLAN);
 			}
@@ -746,7 +793,7 @@
 
 		case ECM_DB_IFACE_TYPE_LAG:
 #ifdef ECM_INTERFACE_BOND_ENABLE
-			if (sfe_is_l2_feature_enabled()) {
+			if (sfe_is_l2_feature_enabled() && (l2_accel_bits & ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED)) {
 				nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE;
 				/*
 				 * LAG device gets its stats by summing up all stats of its
@@ -828,7 +875,7 @@
 				break;
 			}
 
-			if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_BRIDGE, list_index, to_ifaces_first)) {
+			if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_BRIDGE, list_index, to_ifaces_first) && (l2_accel_bits & ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED)) {
 				nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE;
 				feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_TO, ECM_DB_IFACE_TYPE_BRIDGE);
 			}
@@ -855,7 +902,7 @@
 				break;
 			}
 
-			if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_OVS_BRIDGE, list_index, to_ifaces_first)) {
+			if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_OVS_BRIDGE, list_index, to_ifaces_first) && (l2_accel_bits & ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED)) {
 				nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE;
 				feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_TO, ECM_DB_IFACE_TYPE_OVS_BRIDGE);
 			}
@@ -908,6 +955,15 @@
 				break;
 			}
 
+			/*
+			 * If external module decide that L2 acceleration is not allowed, we should return
+			 * without setting PPPoE parameters.
+			 */
+			if (!(l2_accel_bits & ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED)) {
+				DEBUG_TRACE("%px: L2 acceleration is not allowed for the PPPoE interface\n", npci);
+				break;
+			}
+
 			feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_FROM, ECM_DB_IFACE_TYPE_PPPOE);
 
 			/*
@@ -960,7 +1016,7 @@
 			}
 			nircm->valid_flags |= SFE_RULE_CREATE_VLAN_VALID;
 
-			if (sfe_is_l2_feature_enabled()) {
+			if (sfe_is_l2_feature_enabled() && (l2_accel_bits & ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED)) {
 				nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE;
 				feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_TO, ECM_DB_IFACE_TYPE_VLAN);
 
@@ -988,7 +1044,7 @@
 
 		case ECM_DB_IFACE_TYPE_MACVLAN:
 #ifdef ECM_INTERFACE_MACVLAN_ENABLE
-			if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_MACVLAN, list_index, to_ifaces_first)) {
+			if (ecm_sfe_common_is_l2_iface_supported(ECM_DB_IFACE_TYPE_MACVLAN, list_index, to_ifaces_first) && (l2_accel_bits & ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED)) {
 				nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE;
 				feci->set_stats_bitmap(feci, ECM_DB_OBJ_DIR_TO, ECM_DB_IFACE_TYPE_MACVLAN);
 			}
@@ -1025,7 +1081,7 @@
 
 		case ECM_DB_IFACE_TYPE_LAG:
 #ifdef ECM_INTERFACE_BOND_ENABLE
-			if (sfe_is_l2_feature_enabled()) {
+			if (sfe_is_l2_feature_enabled() && (l2_accel_bits & ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED)) {
 				nircm->rule_flags |= SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE;
 				/*
 				 * LAG device gets its stats by summing up all stats of its
@@ -1076,6 +1132,29 @@
 		}
 	}
 
+	if (ecm_interface_src_check) {
+		DEBUG_INFO("%px: Source interface check flag is enabled\n", npci);
+		nircm->rule_flags |= SFE_RULE_CREATE_FLAG_FLOW_SRC_INTERFACE_CHECK;
+		nircm->rule_flags |= SFE_RULE_CREATE_FLAG_RETURN_SRC_INTERFACE_CHECK;
+	}
+
+	/*
+	 * Enable source interface check without flushing the rule for this flow to re-inject the packet to
+	 * the network stack in SFE driver after the first pass of the packet coming with the L2 interface.
+	 * In the second pass, the packet will come to SFE with the L3 interface. If there are more than 3 interfaces
+	 * in the hierarchy, the packet will be re-injected to the stack until the flows input interface matches with the
+	 * rule's match_dev.
+	 */
+	if (!(l2_accel_bits & ECM_SFE_COMMON_FLOW_L2_ACCEL_ALLOWED)) {
+		nircm->rule_flags |= SFE_RULE_CREATE_FLAG_FLOW_SRC_INTERFACE_CHECK;
+		nircm->rule_flags |= SFE_RULE_CREATE_FLAG_FLOW_SRC_INTERFACE_CHECK_NO_FLUSH;
+	}
+
+	if (!(l2_accel_bits & ECM_SFE_COMMON_RETURN_L2_ACCEL_ALLOWED)) {
+		nircm->rule_flags |= SFE_RULE_CREATE_FLAG_RETURN_SRC_INTERFACE_CHECK;
+		nircm->rule_flags |= SFE_RULE_CREATE_FLAG_RETURN_SRC_INTERFACE_CHECK_NO_FLUSH;
+	}
+
 	/*
 	 * Set up the flow and return qos tags
 	 */
@@ -1522,7 +1601,7 @@
 	 * If connection became defunct then set mode so that no further accel/decel attempts occur.
 	 */
 	if (feci->is_defunct) {
-		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT;
+		feci->accel_mode = ECM_FRONT_END_ACCELERATION_MODE_FAIL_DEFUNCT_SHORT;
 	}
 	spin_unlock_bh(&feci->lock);
 
diff --git a/qca-nss-sfe/exports/sfe_api.h b/qca-nss-sfe/exports/sfe_api.h
index d0d28d0..1bcfedc 100644
--- a/qca-nss-sfe/exports/sfe_api.h
+++ b/qca-nss-sfe/exports/sfe_api.h
@@ -44,9 +44,12 @@
 #define SFE_RULE_CREATE_FLAG_L2_ENCAP     (1<<7) /**< consists of an encapsulating protocol that carries an IPv4 payload within it. */
 #define SFE_RULE_CREATE_FLAG_USE_FLOW_BOTTOM_INTERFACE (1<<8) /**< Use flow interface number instead of top interface. */
 #define SFE_RULE_CREATE_FLAG_USE_RETURN_BOTTOM_INTERFACE (1<<9) /**< Use return interface number instead of top interface. */
-#define SFE_RULE_CREATE_FLAG_SRC_INTERFACE_CHECK  (1<<10)  /**< Check source interface. */
-#define SFE_RULE_CREATE_FLAG_FLOW_TRANSMIT_FAST (1<<11) /**< original flow transmit fast. */
-#define SFE_RULE_CREATE_FLAG_RETURN_TRANSMIT_FAST (1<<12) /**< return flow transmit fast. */
+#define SFE_RULE_CREATE_FLAG_FLOW_SRC_INTERFACE_CHECK  (1<<10)  /**< Check source interface on the flow direction . */
+#define SFE_RULE_CREATE_FLAG_RETURN_SRC_INTERFACE_CHECK  (1<<11)  /**< Check source interface on the return direction . */
+#define SFE_RULE_CREATE_FLAG_FLOW_TRANSMIT_FAST (1<<12) /**< original flow transmit fast. */
+#define SFE_RULE_CREATE_FLAG_RETURN_TRANSMIT_FAST (1<<13) /**< return flow transmit fast. */
+#define SFE_RULE_CREATE_FLAG_FLOW_SRC_INTERFACE_CHECK_NO_FLUSH  (1<<14)  /**< Check source interface on the flow direction but do not flush the connection. */
+#define SFE_RULE_CREATE_FLAG_RETURN_SRC_INTERFACE_CHECK_NO_FLUSH  (1<<15)  /**< Check source interface on the return direction but do not flush the connection. */
 
 /**
  * Rule creation validity flags.
diff --git a/qca-nss-sfe/sfe.c b/qca-nss-sfe/sfe.c
index f665318..0195d41 100644
--- a/qca-nss-sfe/sfe.c
+++ b/qca-nss-sfe/sfe.c
@@ -119,6 +119,15 @@
 
 	int32_t l2_feature_support;		/* L2 feature support */
 
+	/*
+	 * SFE Bypass Mode.
+	 * When enabled, SFE's shortcut path will be bypassed.
+	 * 0: Disabled.
+	 * 1: Bypass all packets.
+	 * 2: Bypass only packets with fwmark matches bypass_mark.
+	 */
+	int bypass_mode;
+	u32 bypass_mark;
 };
 
 static struct sfe_ctx_instance_internal __sfe_ctx;
@@ -1261,20 +1270,25 @@
  */
 int sfe_recv(struct sk_buff *skb)
 {
+	struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
 	struct net_device *dev;
 	struct sfe_l2_info l2_info;
 	int ret;
 
-	/*
-	 * We know that for the vast majority of packets we need the transport
-	 * layer header so we may as well start to fetch it now!
-	 */
-	prefetch(skb->data + 32);
-	barrier();
-
 	dev = skb->dev;
 
 	/*
+	 * Apply SFE Bypass Mode policy.
+	 */
+	if (unlikely(sfe_ctx->bypass_mode == 1)) {
+		return 0;
+	}
+	if (unlikely(sfe_ctx->bypass_mode == 2 && sfe_ctx->bypass_mark &&
+		     skb->mark == sfe_ctx->bypass_mark)) {
+		return 0;
+	}
+
+	/*
 	 * Setting parse flags to 0 since l2_info is passed for non L2.5 header case as well
 	 */
 	l2_info.parse_flags = 0;
@@ -1475,6 +1489,69 @@
 	__ATTR(l2_feature,  0644, sfe_get_l2_feature, sfe_set_l2_feature);
 
 /*
+ * SFE Bypass Mode
+ */
+static ssize_t
+sfe_get_bypass_mode(struct device *dev,	struct device_attribute *attr,
+		    char *buf)
+{
+	struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
+	return snprintf(buf, (ssize_t)PAGE_SIZE, "%d\n", sfe_ctx->bypass_mode);
+}
+
+static ssize_t
+sfe_set_bypass_mode(struct device *dev,	struct device_attribute *attr,
+		    const char *buf, size_t count)
+{
+	struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
+	int ret;
+	int bypass_mode;
+
+	ret = kstrtou32(buf, 0, &bypass_mode);
+	if (ret) {
+		return ret;
+	}
+	if (bypass_mode > 2 || bypass_mode < 0) {
+		return -EINVAL;
+	}
+	sfe_ctx->bypass_mode = bypass_mode;
+	return count;
+}
+
+static const struct device_attribute sfe_bypass_mode_attr =
+	__ATTR(bypass_mode, S_IWUSR | S_IRUGO, sfe_get_bypass_mode,
+	       sfe_set_bypass_mode);
+
+static ssize_t
+sfe_get_bypass_mark(struct device *dev,	struct device_attribute *attr,
+		    char *buf)
+{
+	struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
+	return snprintf(buf, (ssize_t)PAGE_SIZE, "0x%x\n",
+			sfe_ctx->bypass_mark);
+}
+
+static ssize_t
+sfe_set_bypass_mark(struct device *dev,	struct device_attribute *attr,
+		    const char *buf, size_t count)
+{
+	struct sfe_ctx_instance_internal *sfe_ctx = &__sfe_ctx;
+	int ret;
+	int bypass_mark;
+
+	ret = kstrtou32(buf, 0, &bypass_mark);
+	if (ret) {
+		return ret;
+	}
+	sfe_ctx->bypass_mark = bypass_mark;
+	return count;
+}
+
+static const struct device_attribute sfe_bypass_mark_attr =
+	__ATTR(bypass_mark, S_IWUSR | S_IRUGO, sfe_get_bypass_mark,
+	       sfe_set_bypass_mark);
+
+/*
  * sfe_init_if()
  */
 int sfe_init_if(void)
@@ -1511,6 +1588,21 @@
 		goto exit2;
 	}
 
+	result = sysfs_create_file(sfe_ctx->sys_sfe,
+				   &sfe_bypass_mode_attr.attr);
+	if (result) {
+		DEBUG_ERROR("failed to register Bypass Mode sysfs file: %d\n",
+			    result);
+		goto exit2;
+	}
+	result = sysfs_create_file(sfe_ctx->sys_sfe,
+				   &sfe_bypass_mark_attr.attr);
+	if (result) {
+		DEBUG_ERROR("failed to register Bypass Mark sysfs file: %d\n",
+			    result);
+		goto exit2;
+	}
+
 	spin_lock_init(&sfe_ctx->lock);
 
 	INIT_LIST_HEAD(&sfe_ctx->msg_queue);
diff --git a/qca-nss-sfe/sfe_ipv4.c b/qca-nss-sfe/sfe_ipv4.c
index 0598b68..1fd2883 100644
--- a/qca-nss-sfe/sfe_ipv4.c
+++ b/qca-nss-sfe/sfe_ipv4.c
@@ -1121,7 +1121,7 @@
 	/*
 	 * Allocate the various connection tracking objects.
 	 */
-	c = (struct sfe_ipv4_connection *)kmalloc(sizeof(struct sfe_ipv4_connection), GFP_ATOMIC);
+	c = (struct sfe_ipv4_connection *)kzalloc(sizeof(struct sfe_ipv4_connection), GFP_ATOMIC);
 	if (unlikely(!c)) {
 		DEBUG_WARN("%px: memory allocation of connection entry failed\n", msg);
 		this_cpu_inc(si->stats_pcpu->connection_create_failures64);
@@ -1130,7 +1130,7 @@
 		return -ENOMEM;
 	}
 
-	original_cm = (struct sfe_ipv4_connection_match *)kmalloc(sizeof(struct sfe_ipv4_connection_match), GFP_ATOMIC);
+	original_cm = (struct sfe_ipv4_connection_match *)kzalloc(sizeof(struct sfe_ipv4_connection_match), GFP_ATOMIC);
 	if (unlikely(!original_cm)) {
 		DEBUG_WARN("%px: memory allocation of connection match entry failed\n", msg);
 		this_cpu_inc(si->stats_pcpu->connection_create_failures64);
@@ -1140,7 +1140,7 @@
 		return -ENOMEM;
 	}
 
-	reply_cm = (struct sfe_ipv4_connection_match *)kmalloc(sizeof(struct sfe_ipv4_connection_match), GFP_ATOMIC);
+	reply_cm = (struct sfe_ipv4_connection_match *)kzalloc(sizeof(struct sfe_ipv4_connection_match), GFP_ATOMIC);
 	if (unlikely(!reply_cm)) {
 		DEBUG_WARN("%px: memory allocation of connection match entry failed\n", msg);
 		this_cpu_inc(si->stats_pcpu->connection_create_failures64);
@@ -1221,18 +1221,11 @@
 		original_cm->xlate_src_port = 0;
 	}
 
-	atomic_set(&original_cm->rx_packet_count, 0);
-	original_cm->rx_packet_count64 = 0;
-	atomic_set(&original_cm->rx_byte_count, 0);
-	original_cm->rx_byte_count64 = 0;
-
 	original_cm->xmit_dev = dest_dev;
 	original_cm->xmit_dev_mtu = msg->conn_rule.return_mtu;
 
 	original_cm->connection = c;
 	original_cm->counter_match = reply_cm;
-	original_cm->l2_hdr_size = 0;
-	original_cm->flags = 0;
 
 	/*
 	 * UDP Socket is valid only in decap direction.
@@ -1310,12 +1303,13 @@
 		}
 	}
 
-	reply_cm->l2_hdr_size = 0;
-	if (msg->rule_flags & SFE_RULE_CREATE_FLAG_SRC_INTERFACE_CHECK) {
+	if (msg->rule_flags & SFE_RULE_CREATE_FLAG_FLOW_SRC_INTERFACE_CHECK) {
 		original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK;
 	}
 
-	reply_cm->flags = 0;
+	if (msg->rule_flags & SFE_RULE_CREATE_FLAG_FLOW_SRC_INTERFACE_CHECK_NO_FLUSH) {
+		original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK_NO_FLUSH;
+	}
 
 	/*
 	 * Adding PPPoE parameters to original and reply entries based on the direction where
@@ -1351,10 +1345,14 @@
 		ether_addr_copy(reply_cm->pppoe_remote_mac, msg->pppoe_rule.return_pppoe_remote_mac);
 	}
 
-	if (msg->rule_flags & SFE_RULE_CREATE_FLAG_SRC_INTERFACE_CHECK) {
+	if (msg->rule_flags & SFE_RULE_CREATE_FLAG_RETURN_SRC_INTERFACE_CHECK) {
 		reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK;
 	}
 
+	if (msg->rule_flags & SFE_RULE_CREATE_FLAG_RETURN_SRC_INTERFACE_CHECK_NO_FLUSH) {
+		reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK_NO_FLUSH;
+	}
+
 	/*
 	 * For the non-arp interface, we don't write L2 HDR.
 	 */
@@ -1426,11 +1424,6 @@
 		reply_cm->xlate_src_port = 0;
 	}
 
-	atomic_set(&reply_cm->rx_packet_count, 0);
-	reply_cm->rx_packet_count64 = 0;
-	atomic_set(&reply_cm->rx_byte_count, 0);
-	reply_cm->rx_byte_count64 = 0;
-
 	reply_cm->xmit_dev = src_dev;
 	reply_cm->xmit_dev_mtu = msg->conn_rule.flow_mtu;
 
@@ -2512,6 +2505,71 @@
 	__ATTR(stats_work_cpu, S_IWUSR | S_IRUGO, sfe_ipv4_get_cpu, sfe_ipv4_set_cpu);
 
 /*
+ * DSCP rewrite table
+ */
+static ssize_t
+sfe_ipv4_get_dscp_rewrite_mark_to_match(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct sfe_ipv4 *si = &__si;
+	return snprintf(buf, (ssize_t)PAGE_SIZE, "0x%x\n",
+			si->dscp_rewrite_mark_to_match);
+}
+
+static ssize_t
+sfe_ipv4_set_dscp_rewrite_mark_to_match(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t size)
+{
+	struct sfe_ipv4 *si = &__si;
+	int ret;
+	u32 mark_to_match;
+
+	ret = kstrtou32(buf, 0, &mark_to_match);
+	if (ret)
+		return ret;
+	si->dscp_rewrite_mark_to_match = mark_to_match;
+	return size;
+}
+
+static const struct device_attribute sfe_ipv4_dscp_rewrite_mark_to_match_attr =
+	__ATTR(dscp_rewrite_mark_to_match, S_IWUSR | S_IRUGO,
+	       sfe_ipv4_get_dscp_rewrite_mark_to_match,
+	       sfe_ipv4_set_dscp_rewrite_mark_to_match);
+
+static ssize_t
+sfe_ipv4_get_dscp_rewrite_dscp_to_set(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf)
+{
+	struct sfe_ipv4 *si = &__si;
+	return snprintf(buf, (ssize_t)PAGE_SIZE, "0x%x\n",
+			si->dscp_rewrite_dscp_to_set >> SFE_IPV4_DSCP_SHIFT);
+}
+
+static ssize_t
+sfe_ipv4_set_dscp_rewrite_dscp_to_set(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf, size_t size)
+{
+	struct sfe_ipv4 *si = &__si;
+	int ret;
+	u32 dscp_to_set;
+
+	ret = kstrtou32(buf, 0, &dscp_to_set);
+	if (ret)
+		return ret;
+	si->dscp_rewrite_dscp_to_set = dscp_to_set << SFE_IPV4_DSCP_SHIFT;
+	return size;
+}
+
+static const struct device_attribute sfe_ipv4_dscp_rewrite_dscp_to_set_attr =
+	__ATTR(dscp_rewrite_dscp_to_set, S_IWUSR | S_IRUGO,
+	       sfe_ipv4_get_dscp_rewrite_dscp_to_set,
+	       sfe_ipv4_set_dscp_rewrite_dscp_to_set);
+
+/*
  * sfe_ipv4_conn_match_hash_init()
  *	Initialize conn match hash lists
  */
@@ -2603,11 +2661,26 @@
 		goto exit3;
 	}
 
+	result = sysfs_create_file(si->sys_ipv4,
+				   &sfe_ipv4_dscp_rewrite_mark_to_match_attr.attr);
+	if (result) {
+		DEBUG_ERROR("failed to register DSCP rewrite mark_to_match file: %d\n",
+			    result);
+		goto exit4;
+	}
+	result = sysfs_create_file(si->sys_ipv4,
+				   &sfe_ipv4_dscp_rewrite_dscp_to_set_attr.attr);
+	if (result) {
+		DEBUG_ERROR("failed to register DSCP rewrite dscp_to_set file: %d\n",
+			    result);
+		goto exit5;
+	}
+
 #ifdef CONFIG_NF_FLOW_COOKIE
 	result = sysfs_create_file(si->sys_ipv4, &sfe_ipv4_flow_cookie_attr.attr);
 	if (result) {
 		DEBUG_ERROR("failed to register flow cookie enable file: %d\n", result);
-		goto exit4;
+		goto exit6;
 	}
 #endif /* CONFIG_NF_FLOW_COOKIE */
 
@@ -2619,7 +2692,7 @@
 #endif
 	if (result < 0) {
 		DEBUG_ERROR("can't register nf local out hook: %d\n", result);
-		goto exit5;
+		goto exit7;
 	}
 	DEBUG_INFO("Register nf local out hook success: %d\n", result);
 #endif
@@ -2629,7 +2702,7 @@
 	result = register_chrdev(0, "sfe_ipv4", &sfe_ipv4_debug_dev_fops);
 	if (result < 0) {
 		DEBUG_ERROR("Failed to register chrdev: %d\n", result);
-		goto exit6;
+		goto exit8;
 	}
 
 	si->debug_dev = result;
@@ -2644,7 +2717,7 @@
 	spin_lock_init(&si->lock);
 	return 0;
 
-exit6:
+exit8:
 #ifdef SFE_PROCESS_LOCAL_OUT
 	DEBUG_TRACE("sfe: Unregister local out hook\n");
 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
@@ -2652,13 +2725,19 @@
 #else
 	nf_unregister_net_hooks(&init_net, sfe_ipv4_ops_local_out, ARRAY_SIZE(sfe_ipv4_ops_local_out));
 #endif
-exit5:
+exit7:
 #endif
 #ifdef CONFIG_NF_FLOW_COOKIE
 	sysfs_remove_file(si->sys_ipv4, &sfe_ipv4_flow_cookie_attr.attr);
 
-exit4:
+exit6:
 #endif /* CONFIG_NF_FLOW_COOKIE */
+	sysfs_remove_file(si->sys_ipv4,
+			  &sfe_ipv4_dscp_rewrite_dscp_to_set_attr.attr);
+exit5:
+	sysfs_remove_file(si->sys_ipv4,
+			  &sfe_ipv4_dscp_rewrite_mark_to_match_attr.attr);
+exit4:
 	sysfs_remove_file(si->sys_ipv4, &sfe_ipv4_cpu_attr.attr);
 exit3:
 	sysfs_remove_file(si->sys_ipv4, &sfe_ipv4_debug_dev_attr.attr);
@@ -2702,6 +2781,10 @@
 #ifdef CONFIG_NF_FLOW_COOKIE
 	sysfs_remove_file(si->sys_ipv4, &sfe_ipv4_flow_cookie_attr.attr);
 #endif /* CONFIG_NF_FLOW_COOKIE */
+	sysfs_remove_file(si->sys_ipv4,
+			  &sfe_ipv4_dscp_rewrite_dscp_to_set_attr.attr);
+	sysfs_remove_file(si->sys_ipv4,
+			  &sfe_ipv4_dscp_rewrite_mark_to_match_attr.attr);
 	sysfs_remove_file(si->sys_ipv4, &sfe_ipv4_debug_dev_attr.attr);
 
 	sysfs_remove_file(si->sys_ipv4, &sfe_ipv4_cpu_attr.attr);
diff --git a/qca-nss-sfe/sfe_ipv4.h b/qca-nss-sfe/sfe_ipv4.h
index 48630db..4e8169b 100644
--- a/qca-nss-sfe/sfe_ipv4.h
+++ b/qca-nss-sfe/sfe_ipv4.h
@@ -69,7 +69,7 @@
 #define SFE_IPV4_CONNECTION_MATCH_FLAG_INSERT_EGRESS_VLAN_TAG (1<<12)
 					/* Insert VLAN tag */
 #define SFE_IPV4_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK (1<<13)
-					/* Source interface check.*/
+					/* Source interface check */
 #define SFE_IPV4_CONNECTION_MATCH_FLAG_PASSTHROUGH (1<<14)
 					/* passthrough flow: encap/decap to be skipped for this flow */
 #define SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT (1<<15)
@@ -78,6 +78,8 @@
 					/* Fast xmit flow checked or not */
 #define SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT_DEV_ADMISSION (1<<17)
 					/* Fast xmit may be possible for this flow, if SFE check passes */
+#define SFE_IPV4_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK_NO_FLUSH (1<<18)
+					/* Source interface check but do not flush the connection */
 
 /*
  * IPv4 connection matching structure.
@@ -363,6 +365,16 @@
 	struct kobject *sys_ipv4;	/* sysfs linkage */
 	int debug_dev;			/* Major number of the debug char device */
 	u32 debug_read_seq;	/* sequence number for debug dump */
+
+	/*
+	 * DSCP rewrite table
+	 * When `mark_to_match` is set non-zero then any packet with the
+	 * specified skb->mark will override flow DSCP policy with
+	 * `dscp_to_set` value. i.e. basically equivalent to `iptables -m mark
+	 * --mark <mark_to_match> -j DSCP --set-dscp <dscp_to_set>`
+	 */
+	u32 dscp_rewrite_mark_to_match;
+	u32 dscp_rewrite_dscp_to_set;
 };
 
 /*
diff --git a/qca-nss-sfe/sfe_ipv4_gre.c b/qca-nss-sfe/sfe_ipv4_gre.c
index 084ea3b..9626a58 100644
--- a/qca-nss-sfe/sfe_ipv4_gre.c
+++ b/qca-nss-sfe/sfe_ipv4_gre.c
@@ -100,19 +100,22 @@
 	 * Source interface validate.
 	 */
 	if (unlikely((cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK) && (cm->match_dev != dev))) {
-		struct sfe_ipv4_connection *c = cm->connection;
-		int ret;
+		if (!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK_NO_FLUSH)) {
+			struct sfe_ipv4_connection *c = cm->connection;
+			int ret;
 
-		spin_lock_bh(&si->lock);
-		ret = sfe_ipv4_remove_connection(si, c);
-		spin_unlock_bh(&si->lock);
+			DEBUG_TRACE("flush on source interface check failure\n");
+			spin_lock_bh(&si->lock);
+			ret = sfe_ipv4_remove_connection(si, c);
+			spin_unlock_bh(&si->lock);
 
-		if (ret) {
-			sfe_ipv4_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
+			if (ret) {
+				sfe_ipv4_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
+			}
 		}
 		rcu_read_unlock();
 		sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_INVALID_SRC_IFACE);
-		DEBUG_TRACE("flush on wrong source interface check failure\n");
+		DEBUG_TRACE("exception the packet on source interface check failure\n");
 		return 0;
 	}
 
diff --git a/qca-nss-sfe/sfe_ipv4_tcp.c b/qca-nss-sfe/sfe_ipv4_tcp.c
index 8de3269..b2d5ec9 100644
--- a/qca-nss-sfe/sfe_ipv4_tcp.c
+++ b/qca-nss-sfe/sfe_ipv4_tcp.c
@@ -194,17 +194,20 @@
 	 * Source interface validate.
 	 */
 	if (unlikely((cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK) && (cm->match_dev != dev))) {
-		struct sfe_ipv4_connection *c = cm->connection;
-		spin_lock_bh(&si->lock);
-		ret = sfe_ipv4_remove_connection(si, c);
-		spin_unlock_bh(&si->lock);
+		if (!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK_NO_FLUSH)) {
+			struct sfe_ipv4_connection *c = cm->connection;
+			DEBUG_TRACE("flush on source interface check failure\n");
+			spin_lock_bh(&si->lock);
+			ret = sfe_ipv4_remove_connection(si, c);
+			spin_unlock_bh(&si->lock);
 
-		if (ret) {
-			sfe_ipv4_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
+			if (ret) {
+				sfe_ipv4_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
+			}
 		}
 		rcu_read_unlock();
 		sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_INVALID_SRC_IFACE);
-		DEBUG_TRACE("flush on wrong source interface check failure\n");
+		DEBUG_TRACE("exception the packet on source interface check failure\n");
 		return 0;
 	}
 
@@ -564,9 +567,26 @@
 	}
 
 	/*
-	 * Update DSCP
+	 * Apply packet Mark.
+	 * If Mark was set by the Ingress Qdisc that takes precedence over
+	 * flow policy.
 	 */
-	if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_DSCP_REMARK)) {
+	if (likely(skb->mark == 0)) {
+		if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_MARK)) {
+			skb->mark = cm->mark;
+		}
+	}
+
+	/*
+	 * Update DSCP
+	 * DSCP rewrite table takes precedence over flow policy.
+	 */
+	if (unlikely(si->dscp_rewrite_mark_to_match != 0 &&
+		     si->dscp_rewrite_mark_to_match == skb->mark)) {
+		iph->tos = (iph->tos & SFE_IPV4_DSCP_MASK) |
+			si->dscp_rewrite_dscp_to_set;
+	} else if (unlikely(cm->flags &
+			    SFE_IPV4_CONNECTION_MATCH_FLAG_DSCP_REMARK)) {
 		iph->tos = (iph->tos & SFE_IPV4_DSCP_MASK) | cm->dscp;
 	}
 
@@ -686,13 +706,6 @@
 	}
 
 	/*
-	 * Mark outgoing packet
-	 */
-	if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_MARK)) {
-		skb->mark = cm->mark;
-	}
-
-	/*
 	 * For the first packets, check if it could got fast xmit.
 	 */
 	if (unlikely(!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT_FLOW_CHECKED)
diff --git a/qca-nss-sfe/sfe_ipv4_udp.c b/qca-nss-sfe/sfe_ipv4_udp.c
index 1762d74..4b15f7c 100644
--- a/qca-nss-sfe/sfe_ipv4_udp.c
+++ b/qca-nss-sfe/sfe_ipv4_udp.c
@@ -190,17 +190,20 @@
 	 * Source interface validate.
 	 */
 	if (unlikely((cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK) && (cm->match_dev != dev))) {
-		struct sfe_ipv4_connection *c = cm->connection;
-		spin_lock_bh(&si->lock);
-		ret = sfe_ipv4_remove_connection(si, c);
-		spin_unlock_bh(&si->lock);
+		if (!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK_NO_FLUSH)) {
+			struct sfe_ipv4_connection *c = cm->connection;
+			DEBUG_TRACE("flush on source interface check failure\n");
+			spin_lock_bh(&si->lock);
+			ret = sfe_ipv4_remove_connection(si, c);
+			spin_unlock_bh(&si->lock);
 
-		if (ret) {
-			sfe_ipv4_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
+			if (ret) {
+				sfe_ipv4_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
+			}
 		}
 		rcu_read_unlock();
 		sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_INVALID_SRC_IFACE);
-		DEBUG_TRACE("flush on wrong source interface check failure\n");
+		DEBUG_TRACE("exception the packet on source interface check failure\n");
 		return 0;
 	}
 
@@ -470,9 +473,26 @@
 	}
 
 	/*
-	 * Update DSCP
+	 * Apply packet Mark.
+	 * If Mark was set by the Ingress Qdisc that takes precedence over
+	 * flow policy.
 	 */
-	if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_DSCP_REMARK)) {
+	if (likely(skb->mark == 0)) {
+		if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_MARK)) {
+			skb->mark = cm->mark;
+		}
+	}
+
+	/*
+	 * Update DSCP
+	 * DSCP rewrite table takes precedence over flow policy.
+	 */
+	if (unlikely(si->dscp_rewrite_mark_to_match != 0 &&
+		     si->dscp_rewrite_mark_to_match == skb->mark)) {
+		iph->tos = (iph->tos & SFE_IPV4_DSCP_MASK) |
+			si->dscp_rewrite_dscp_to_set;
+	} else if (unlikely(cm->flags &
+			    SFE_IPV4_CONNECTION_MATCH_FLAG_DSCP_REMARK)) {
 		iph->tos = (iph->tos & SFE_IPV4_DSCP_MASK) | cm->dscp;
 	}
 
@@ -529,13 +549,6 @@
 	}
 
 	/*
-	 * Mark outgoing packet.
-	 */
-	if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_MARK)) {
-		skb->mark = cm->mark;
-	}
-
-	/*
 	 * For the first packets, check if it could got fast xmit.
 	 */
 	if (unlikely(!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT_FLOW_CHECKED)
diff --git a/qca-nss-sfe/sfe_ipv6.c b/qca-nss-sfe/sfe_ipv6.c
index cbd67ec..488f3a4 100644
--- a/qca-nss-sfe/sfe_ipv6.c
+++ b/qca-nss-sfe/sfe_ipv6.c
@@ -159,10 +159,10 @@
 	 */
 	hlist_for_each_entry_rcu(cm, lhead, hnode) {
 		if ((cm->match_dest_port != dest_port) ||
+		    (cm->match_src_port != src_port) ||
 		    (!sfe_ipv6_addr_equal(cm->match_src_ip, src_ip)) ||
 		    (!sfe_ipv6_addr_equal(cm->match_dest_ip, dest_ip)) ||
-		    (cm->match_protocol != protocol) ||
-		    (cm->match_dev != dev)) {
+		    (cm->match_protocol != protocol)) {
 			continue;
 		}
 
@@ -1129,7 +1129,7 @@
 	/*
 	 * Allocate the various connection tracking objects.
 	 */
-	c = (struct sfe_ipv6_connection *)kmalloc(sizeof(struct sfe_ipv6_connection), GFP_ATOMIC);
+	c = (struct sfe_ipv6_connection *)kzalloc(sizeof(struct sfe_ipv6_connection), GFP_ATOMIC);
 	if (unlikely(!c)) {
 		DEBUG_WARN("%px: memory allocation of connection entry failed\n", msg);
 		this_cpu_inc(si->stats_pcpu->connection_create_failures64);
@@ -1138,7 +1138,7 @@
 		return -ENOMEM;
 	}
 
-	original_cm = (struct sfe_ipv6_connection_match *)kmalloc(sizeof(struct sfe_ipv6_connection_match), GFP_ATOMIC);
+	original_cm = (struct sfe_ipv6_connection_match *)kzalloc(sizeof(struct sfe_ipv6_connection_match), GFP_ATOMIC);
 	if (unlikely(!original_cm)) {
 		this_cpu_inc(si->stats_pcpu->connection_create_failures64);
 		DEBUG_WARN("%px: memory allocation of connection match entry failed\n", msg);
@@ -1148,7 +1148,7 @@
 		return -ENOMEM;
 	}
 
-	reply_cm = (struct sfe_ipv6_connection_match *)kmalloc(sizeof(struct sfe_ipv6_connection_match), GFP_ATOMIC);
+	reply_cm = (struct sfe_ipv6_connection_match *)kzalloc(sizeof(struct sfe_ipv6_connection_match), GFP_ATOMIC);
 	if (unlikely(!reply_cm)) {
 		this_cpu_inc(si->stats_pcpu->connection_create_failures64);
 		DEBUG_WARN("%px: memory allocation of connection match entry failed\n", msg);
@@ -1217,18 +1217,12 @@
 	original_cm->xlate_dest_ip[0] = *(struct sfe_ipv6_addr *)tuple->return_ip;
 	original_cm->xlate_dest_port =  tuple->return_ident;
 
-	atomic_set(&original_cm->rx_packet_count, 0);
-	original_cm->rx_packet_count64 = 0;
-	atomic_set(&original_cm->rx_byte_count, 0);
-	original_cm->rx_byte_count64 = 0;
 	original_cm->xmit_dev = dest_dev;
 
 	original_cm->xmit_dev_mtu = msg->conn_rule.return_mtu;
 
 	original_cm->connection = c;
 	original_cm->counter_match = reply_cm;
-	original_cm->l2_hdr_size = 0;
-	original_cm->flags = 0;
 
 	/*
 	 * Valid in decap direction only
@@ -1307,9 +1301,6 @@
 		}
 	}
 
-	reply_cm->l2_hdr_size = 0;
-	reply_cm->flags = 0;
-
 	/*
 	 * Adding PPPoE parameters to original and reply entries based on the direction where
 	 * PPPoE header is valid in ECM rule.
@@ -1344,10 +1335,14 @@
 		ether_addr_copy(reply_cm->pppoe_remote_mac, msg->pppoe_rule.return_pppoe_remote_mac);
 	}
 
-	if (msg->rule_flags & SFE_RULE_CREATE_FLAG_SRC_INTERFACE_CHECK) {
+	if (msg->rule_flags & SFE_RULE_CREATE_FLAG_FLOW_SRC_INTERFACE_CHECK) {
 		original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK;
 	}
 
+	if (msg->rule_flags & SFE_RULE_CREATE_FLAG_FLOW_SRC_INTERFACE_CHECK_NO_FLUSH) {
+		original_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK_NO_FLUSH;
+	}
+
 	/*
 	 * For the non-arp interface, we don't write L2 HDR.
 	 * Excluding PPPoE from this, since we are now supporting PPPoE encap/decap.
@@ -1406,10 +1401,6 @@
 		reply_cm->match_src_port = tuple->return_ident;
 	}
 
-	atomic_set(&original_cm->rx_byte_count, 0);
-	reply_cm->rx_packet_count64 = 0;
-	atomic_set(&reply_cm->rx_byte_count, 0);
-	reply_cm->rx_byte_count64 = 0;
 	reply_cm->xmit_dev = src_dev;
 	reply_cm->xmit_dev_mtu = msg->conn_rule.flow_mtu;
 
@@ -1606,10 +1597,14 @@
 		}
 	}
 
-	if (msg->rule_flags & SFE_RULE_CREATE_FLAG_SRC_INTERFACE_CHECK) {
+	if (msg->rule_flags & SFE_RULE_CREATE_FLAG_RETURN_SRC_INTERFACE_CHECK) {
 		reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK;
 	}
 
+	if (msg->rule_flags & SFE_RULE_CREATE_FLAG_RETURN_SRC_INTERFACE_CHECK_NO_FLUSH) {
+		reply_cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK_NO_FLUSH;
+	}
+
 	/*
 	 * For the non-arp interface, we don't write L2 HDR.
 	 * Excluding PPPoE from this, since we are now supporting PPPoE encap/decap.
@@ -2489,7 +2484,72 @@
 static const struct device_attribute sfe_ipv6_cpu_attr =
 	__ATTR(stat_work_cpu, S_IWUSR | S_IRUGO, sfe_ipv6_get_cpu, sfe_ipv6_set_cpu);
 
- /*
+/*
+ * DSCP rewrite table
+ */
+static ssize_t
+sfe_ipv6_get_dscp_rewrite_mark_to_match(struct device *dev,
+					struct device_attribute *attr,
+					char *buf)
+{
+	struct sfe_ipv6 *si = &__si6;
+	return snprintf(buf, (ssize_t)PAGE_SIZE, "0x%x\n",
+			si->dscp_rewrite_mark_to_match);
+}
+
+static ssize_t
+sfe_ipv6_set_dscp_rewrite_mark_to_match(struct device *dev,
+					struct device_attribute *attr,
+					const char *buf, size_t size)
+{
+	struct sfe_ipv6 *si = &__si6;
+	int ret;
+	u32 mark_to_match;
+
+	ret = kstrtou32(buf, 0, &mark_to_match);
+	if (ret)
+		return ret;
+	si->dscp_rewrite_mark_to_match = mark_to_match;
+	return size;
+}
+
+static const struct device_attribute sfe_ipv6_dscp_rewrite_mark_to_match_attr =
+	__ATTR(dscp_rewrite_mark_to_match, S_IWUSR | S_IRUGO,
+	       sfe_ipv6_get_dscp_rewrite_mark_to_match,
+	       sfe_ipv6_set_dscp_rewrite_mark_to_match);
+
+static ssize_t
+sfe_ipv6_get_dscp_rewrite_dscp_to_set(struct device *dev,
+				      struct device_attribute *attr,
+				      char *buf)
+{
+	struct sfe_ipv6 *si = &__si6;
+	return snprintf(buf, (ssize_t)PAGE_SIZE, "0x%x\n",
+			si->dscp_rewrite_dscp_to_set >> SFE_IPV6_DSCP_SHIFT);
+}
+
+static ssize_t
+sfe_ipv6_set_dscp_rewrite_dscp_to_set(struct device *dev,
+				      struct device_attribute *attr,
+				      const char *buf, size_t size)
+{
+	struct sfe_ipv6 *si = &__si6;
+	int ret;
+	u32 dscp_to_set;
+
+	ret = kstrtou32(buf, 0, &dscp_to_set);
+	if (ret)
+		return ret;
+	si->dscp_rewrite_dscp_to_set = dscp_to_set << SFE_IPV6_DSCP_SHIFT;
+	return size;
+}
+
+static const struct device_attribute sfe_ipv6_dscp_rewrite_dscp_to_set_attr =
+	__ATTR(dscp_rewrite_dscp_to_set, S_IWUSR | S_IRUGO,
+	       sfe_ipv6_get_dscp_rewrite_dscp_to_set,
+	       sfe_ipv6_set_dscp_rewrite_dscp_to_set);
+
+/*
  * sfe_ipv6_hash_init()
  *	Initialize conn match hash lists
  */
@@ -2583,11 +2643,26 @@
 		goto exit3;
 	}
 
+	result = sysfs_create_file(si->sys_ipv6,
+				   &sfe_ipv6_dscp_rewrite_mark_to_match_attr.attr);
+	if (result) {
+		DEBUG_ERROR("failed to register DSCP rewrite mark_to_match file: %d\n",
+			    result);
+		goto exit4;
+	}
+	result = sysfs_create_file(si->sys_ipv6,
+				   &sfe_ipv6_dscp_rewrite_dscp_to_set_attr.attr);
+	if (result) {
+		DEBUG_ERROR("failed to register DSCP rewrite dscp_to_set file: %d\n",
+			    result);
+		goto exit5;
+	}
+
 #ifdef CONFIG_NF_FLOW_COOKIE
 	result = sysfs_create_file(si->sys_ipv6, &sfe_ipv6_flow_cookie_attr.attr);
 	if (result) {
 		DEBUG_ERROR("failed to register flow cookie enable file: %d\n", result);
-		goto exit4;
+		goto exit6;
 	}
 #endif /* CONFIG_NF_FLOW_COOKIE */
 
@@ -2597,13 +2672,13 @@
 #else
 	result = nf_register_net_hooks(&init_net, sfe_ipv6_ops_local_out, ARRAY_SIZE(sfe_ipv6_ops_local_out));
 #endif
-#endif
 	if (result < 0) {
 		DEBUG_ERROR("can't register nf local out hook: %d\n", result);
-		goto exit5;
+		goto exit7;
 	} else {
 		DEBUG_ERROR("Register nf local out hook success: %d\n", result);
 	}
+#endif
 
 	/*
 	 * Register our debug char device.
@@ -2611,7 +2686,7 @@
 	result = register_chrdev(0, "sfe_ipv6", &sfe_ipv6_debug_dev_fops);
 	if (result < 0) {
 		DEBUG_ERROR("Failed to register chrdev: %d\n", result);
-		goto exit6;
+		goto exit8;
 	}
 
 	si->debug_dev = result;
@@ -2626,7 +2701,7 @@
 
 	return 0;
 
-exit6:
+exit8:
 #ifdef SFE_PROCESS_LOCAL_OUT
 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
 	DEBUG_TRACE("sfe: Unregister local out hook\n");
@@ -2635,16 +2710,21 @@
 	DEBUG_TRACE("sfe: Unregister local out hook\n");
 	nf_unregister_net_hooks(&init_net, sfe_ipv6_ops_local_out, ARRAY_SIZE(sfe_ipv6_ops_local_out));
 #endif
+exit7:
 #endif
 
-exit5:
 #ifdef CONFIG_NF_FLOW_COOKIE
 	sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_flow_cookie_attr.attr);
 
-exit4:
+exit6:
 #endif /* CONFIG_NF_FLOW_COOKIE */
+	sysfs_remove_file(si->sys_ipv6,
+			  &sfe_ipv6_dscp_rewrite_dscp_to_set_attr.attr);
+exit5:
+	sysfs_remove_file(si->sys_ipv6,
+			  &sfe_ipv6_dscp_rewrite_mark_to_match_attr.attr);
+exit4:
 	sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_cpu_attr.attr);
-
 exit3:
 	sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_debug_dev_attr.attr);
 
@@ -2691,7 +2771,10 @@
 #ifdef CONFIG_NF_FLOW_COOKIE
 	sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_flow_cookie_attr.attr);
 #endif /* CONFIG_NF_FLOW_COOKIE */
-
+	sysfs_remove_file(si->sys_ipv6,
+			  &sfe_ipv6_dscp_rewrite_dscp_to_set_attr.attr);
+	sysfs_remove_file(si->sys_ipv6,
+			  &sfe_ipv6_dscp_rewrite_mark_to_match_attr.attr);
 	sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_cpu_attr.attr);
 
 	sysfs_remove_file(si->sys_ipv6, &sfe_ipv6_debug_dev_attr.attr);
diff --git a/qca-nss-sfe/sfe_ipv6.h b/qca-nss-sfe/sfe_ipv6.h
index 2aa9f41..9c78f1c 100644
--- a/qca-nss-sfe/sfe_ipv6.h
+++ b/qca-nss-sfe/sfe_ipv6.h
@@ -82,7 +82,7 @@
 #define SFE_IPV6_CONNECTION_MATCH_FLAG_INSERT_EGRESS_VLAN_TAG (1<<12)
 					/* Insert VLAN tag */
 #define SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK (1<<13)
-					/* Source interface check.*/
+					/* Source interface check */
 #define SFE_IPV6_CONNECTION_MATCH_FLAG_PASSTHROUGH (1<<14)
 					/* passthrough flow: encap/decap to be skipped for this flow */
 #define SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT (1<<15)
@@ -91,6 +91,8 @@
 					/* fast xmit checked or not*/
 #define SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT_DEV_ADMISSION (1<<17)
 					/* Fast xmit may be possible for this flow, if SFE check passes */
+#define SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK_NO_FLUSH (1<<18)
+					/* Source interface check but do not flush the connection */
 
 /*
  * IPv6 connection matching structure.
@@ -381,6 +383,16 @@
 	struct kobject *sys_ipv6;	/* sysfs linkage */
 	int debug_dev;			/* Major number of the debug char device */
 	u32 debug_read_seq;		/* sequence number for debug dump */
+
+	/*
+	 * DSCP rewrite table
+	 * When `mark_to_match` is set non-zero then any packet with the
+	 * specified skb->mark will override flow DSCP policy with
+	 * `dscp_to_set` value. i.e. basically equivalent to `ip6tables -m mark
+	 * --mark <mark_to_match> -j DSCP --set-dscp <dscp_to_set>`
+	 */
+	u32 dscp_rewrite_mark_to_match;
+	u32 dscp_rewrite_dscp_to_set;
 };
 
 /*
diff --git a/qca-nss-sfe/sfe_ipv6_gre.c b/qca-nss-sfe/sfe_ipv6_gre.c
index 361c23a..8a48b3f 100644
--- a/qca-nss-sfe/sfe_ipv6_gre.c
+++ b/qca-nss-sfe/sfe_ipv6_gre.c
@@ -99,18 +99,21 @@
 	 * Source interface validate.
 	 */
 	if (unlikely((cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK) && (cm->match_dev != dev))) {
-		struct sfe_ipv6_connection *c = cm->connection;
-		int ret;
-		spin_lock_bh(&si->lock);
-		ret = sfe_ipv6_remove_connection(si, c);
-		spin_unlock_bh(&si->lock);
+		if (!(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK_NO_FLUSH)) {
+			struct sfe_ipv6_connection *c = cm->connection;
+			int ret;
+			DEBUG_TRACE("flush on source interface check failure\n");
+			spin_lock_bh(&si->lock);
+			ret = sfe_ipv6_remove_connection(si, c);
+			spin_unlock_bh(&si->lock);
 
-		if (ret) {
-			sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
+			if (ret) {
+				sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
+			}
 		}
 		rcu_read_unlock();
 		sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_INVALID_SRC_IFACE);
-		DEBUG_TRACE("flush on wrong source interface check failure\n");
+		DEBUG_TRACE("exception the packet on source interface check failure\n");
 		return 0;
 	}
 
diff --git a/qca-nss-sfe/sfe_ipv6_tcp.c b/qca-nss-sfe/sfe_ipv6_tcp.c
index 6ccc8c7..6ba30b3 100644
--- a/qca-nss-sfe/sfe_ipv6_tcp.c
+++ b/qca-nss-sfe/sfe_ipv6_tcp.c
@@ -196,17 +196,20 @@
 	 * Source interface validate.
 	 */
 	if (unlikely((cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK) && (cm->match_dev != dev))) {
-		struct sfe_ipv6_connection *c = cm->connection;
-		spin_lock_bh(&si->lock);
-		ret = sfe_ipv6_remove_connection(si, c);
-		spin_unlock_bh(&si->lock);
+		if (!(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK_NO_FLUSH)) {
+			struct sfe_ipv6_connection *c = cm->connection;
+			DEBUG_TRACE("flush on source interface check failure\n");
+			spin_lock_bh(&si->lock);
+			ret = sfe_ipv6_remove_connection(si, c);
+			spin_unlock_bh(&si->lock);
 
-		if (ret) {
-			sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
+			if (ret) {
+				sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
+			}
 		}
 		rcu_read_unlock();
 		sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_INVALID_SRC_IFACE);
-		DEBUG_TRACE("flush on wrong source interface check failure\n");
+		DEBUG_TRACE("exception the packet on source interface check failure\n");
 		return 0;
 	}
 
@@ -575,9 +578,25 @@
 	}
 
 	/*
-	 * Update DSCP
+	 * Apply packet Mark.
+	 * If Mark was set by the Ingress Qdisc that takes precedence over
+	 * flow policy.
 	 */
-	if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_DSCP_REMARK)) {
+	if (likely(skb->mark == 0)) {
+		if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_MARK)) {
+			skb->mark = cm->mark;
+		}
+	}
+
+	/*
+	 * Update DSCP
+	 * DSCP rewrite table takes precedence over flow policy.
+	 */
+	if (unlikely(si->dscp_rewrite_mark_to_match != 0 &&
+		     si->dscp_rewrite_mark_to_match == skb->mark)) {
+		sfe_ipv6_change_dsfield(iph, si->dscp_rewrite_dscp_to_set);
+	} else if (unlikely(cm->flags &
+			    SFE_IPV6_CONNECTION_MATCH_FLAG_DSCP_REMARK)) {
 		sfe_ipv6_change_dsfield(iph, cm->dscp);
 	}
 
@@ -691,13 +710,6 @@
 	}
 
 	/*
-	 * Mark outgoing packet
-	 */
-	if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_MARK)) {
-		skb->mark = cm->mark;
-	}
-
-	/*
 	 * For the first packets, check if it could got fast xmit.
 	 */
 	if (unlikely(!(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT_FLOW_CHECKED)
diff --git a/qca-nss-sfe/sfe_ipv6_udp.c b/qca-nss-sfe/sfe_ipv6_udp.c
index f34c6ee..445b43f 100644
--- a/qca-nss-sfe/sfe_ipv6_udp.c
+++ b/qca-nss-sfe/sfe_ipv6_udp.c
@@ -206,17 +206,20 @@
 	 * Source interface validate.
 	 */
 	if (unlikely((cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK) && (cm->match_dev != dev))) {
-		struct sfe_ipv6_connection *c = cm->connection;
-		spin_lock_bh(&si->lock);
-		ret = sfe_ipv6_remove_connection(si, c);
-		spin_unlock_bh(&si->lock);
+		if (!(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK_NO_FLUSH)) {
+			struct sfe_ipv6_connection *c = cm->connection;
+			DEBUG_TRACE("flush on source interface check failure\n");
+			spin_lock_bh(&si->lock);
+			ret = sfe_ipv6_remove_connection(si, c);
+			spin_unlock_bh(&si->lock);
 
-		if (ret) {
-			sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
+			if (ret) {
+				sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
+			}
 		}
 		rcu_read_unlock();
 		sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_INVALID_SRC_IFACE);
-		DEBUG_TRACE("flush on wrong source interface check failure\n");
+		DEBUG_TRACE("exception the packet on source interface check failure\n");
 		return 0;
 	}
 
@@ -400,9 +403,25 @@
 	}
 
 	/*
-	 * Update DSCP
+	 * Apply packet Mark.
+	 * If Mark was set by the Ingress Qdisc that takes precedence over
+	 * flow policy.
 	 */
-	if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_DSCP_REMARK)) {
+	if (likely(skb->mark == 0)) {
+		if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_MARK)) {
+			skb->mark = cm->mark;
+		}
+	}
+
+	/*
+	 * Update DSCP
+	 * DSCP rewrite table takes precedence over flow policy.
+	 */
+	if (unlikely(si->dscp_rewrite_mark_to_match != 0 &&
+		     si->dscp_rewrite_mark_to_match == skb->mark)) {
+		sfe_ipv6_change_dsfield(iph, si->dscp_rewrite_dscp_to_set);
+	} else if (unlikely(cm->flags &
+			    SFE_IPV6_CONNECTION_MATCH_FLAG_DSCP_REMARK)) {
 		sfe_ipv6_change_dsfield(iph, cm->dscp);
 	}
 
@@ -523,13 +542,6 @@
 	}
 
 	/*
-	 * Mark outgoing packet.
-	 */
-	if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_MARK)) {
-		skb->mark = cm->mark;
-	}
-
-	/*
 	 * For the first packets, check if it could got fast xmit.
 	 */
 	if (unlikely(!(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT_FLOW_CHECKED)