Project import generated by Copybara.

GitOrigin-RevId: 82bae8264881dc9bc98b8b3954e5915a12817633
diff --git a/0001-Revert-SFE-ESP-changes.patch b/0001-Revert-SFE-ESP-changes.patch
deleted file mode 100644
index d824011..0000000
--- a/0001-Revert-SFE-ESP-changes.patch
+++ /dev/null
@@ -1,876 +0,0 @@
-From 466a71a2b97c575ebc0cab4e2af94b00e83c2218 Mon Sep 17 00:00:00 2001
-From: Siva Eluri <eluris@google.com>
-Date: Fri, 3 Nov 2023 12:05:09 -0700
-Subject: [PATCH] Revert SFE ESP changes
-
-Change-Id: Ib5816fc4cc80ac8d1b6219d35871298c7bd5b409
----
- qca-nss-sfe/Makefile       |   4 +-
- qca-nss-sfe/sfe.c          |   6 -
- qca-nss-sfe/sfe_ipv4.c     |  31 +---
- qca-nss-sfe/sfe_ipv4.h     |   4 -
- qca-nss-sfe/sfe_ipv4_esp.c | 295 -------------------------------------
- qca-nss-sfe/sfe_ipv4_esp.h |  21 ---
- qca-nss-sfe/sfe_ipv6.c     |  29 +---
- qca-nss-sfe/sfe_ipv6.h     |   4 -
- qca-nss-sfe/sfe_ipv6_esp.c | 275 ----------------------------------
- qca-nss-sfe/sfe_ipv6_esp.h |  21 ---
- 10 files changed, 5 insertions(+), 685 deletions(-)
- delete mode 100644 qca-nss-sfe/sfe_ipv4_esp.c
- delete mode 100644 qca-nss-sfe/sfe_ipv4_esp.h
- delete mode 100644 qca-nss-sfe/sfe_ipv6_esp.c
- delete mode 100644 qca-nss-sfe/sfe_ipv6_esp.h
-
-diff --git a/qca-nss-sfe/Makefile b/qca-nss-sfe/Makefile
-index 6ac42d3..d52146c 100644
---- a/qca-nss-sfe/Makefile
-+++ b/qca-nss-sfe/Makefile
-@@ -5,8 +5,8 @@
- KERNELVERSION := $(word 1, $(subst ., ,$(KERNELVERSION))).$(word 2, $(subst ., ,$(KERNELVERSION)))
- 
- SFE_BASE_OBJS := sfe.o sfe_init.o
--SFE_IPV4_OBJS := sfe_ipv4.o sfe_ipv4_udp.o sfe_ipv4_tcp.o sfe_ipv4_icmp.o sfe_ipv4_esp.o
--SFE_IPV6_OBJS := sfe_ipv6.o sfe_ipv6_udp.o sfe_ipv6_tcp.o sfe_ipv6_icmp.o sfe_ipv6_tunipip6.o sfe_ipv6_esp.o
-+SFE_IPV4_OBJS := sfe_ipv4.o sfe_ipv4_udp.o sfe_ipv4_tcp.o sfe_ipv4_icmp.o
-+SFE_IPV6_OBJS := sfe_ipv6.o sfe_ipv6_udp.o sfe_ipv6_tcp.o sfe_ipv6_icmp.o sfe_ipv6_tunipip6.o
- SFE_PPPOE_OBJS := sfe_pppoe.o
- 
- 
-diff --git a/qca-nss-sfe/sfe.c b/qca-nss-sfe/sfe.c
-index 8bc387c..b352e9a 100644
---- a/qca-nss-sfe/sfe.c
-+++ b/qca-nss-sfe/sfe.c
-@@ -680,9 +680,6 @@ sfe_tx_status_t sfe_create_ipv4_rule_msg(struct sfe_ctx_instance_internal *sfe_c
- 	case IPPROTO_GRE:
- 		break;
- 
--	case IPPROTO_ESP:
--		break;
--
- 	default:
- 		ret = SFE_CMN_RESPONSE_EMSG;
- 		sfe_incr_exceptions(SFE_EXCEPTION_PROTOCOL_NOT_SUPPORT);
-@@ -1051,9 +1048,6 @@ sfe_tx_status_t sfe_create_ipv6_rule_msg(struct sfe_ctx_instance_internal *sfe_c
- 	case IPPROTO_GRE:
- 		break;
- 
--	case IPPROTO_ESP:
--		break;
--
- 	default:
- 		ret = SFE_CMN_RESPONSE_EMSG;
- 		sfe_incr_exceptions(SFE_EXCEPTION_PROTOCOL_NOT_SUPPORT);
-diff --git a/qca-nss-sfe/sfe_ipv4.c b/qca-nss-sfe/sfe_ipv4.c
-index 48eec72..aeb9a42 100644
---- a/qca-nss-sfe/sfe_ipv4.c
-+++ b/qca-nss-sfe/sfe_ipv4.c
-@@ -45,7 +45,6 @@
- #include "sfe_ipv4_icmp.h"
- #include "sfe_pppoe.h"
- #include "sfe_ipv4_gre.h"
--#include "sfe_ipv4_esp.h"
- 
- static char *sfe_ipv4_exception_events_string[SFE_IPV4_EXCEPTION_EVENT_LAST] = {
- 	"UDP_HEADER_INCOMPLETE",
-@@ -96,10 +95,6 @@ static char *sfe_ipv4_exception_events_string[SFE_IPV4_EXCEPTION_EVENT_LAST] = {
- 	"GRE_IP_OPTIONS_OR_INITIAL_FRAGMENT",
- 	"GRE_SMALL_TTL",
- 	"GRE_NEEDS_FRAGMENTATION",
--	"ESP_NO_CONNECTION",
--	"ESP_IP_OPTIONS_OR_INITIAL_FRAGMENT",
--	"ESP_NEEDS_FRAGMENTATION",
--	"ESP_SMALL_TTL"
- };
- 
- static struct sfe_ipv4 __si;
-@@ -876,10 +871,6 @@ int sfe_ipv4_recv(struct net_device *dev, struct sk_buff *skb, struct sfe_l2_inf
- 		return sfe_ipv4_recv_tcp(si, skb, dev, len, iph, ihl, sync_on_find, l2_info);
- 	}
- 
--	if (IPPROTO_ESP == protocol) {
--		return sfe_ipv4_recv_esp(si, skb, dev, len, iph, ihl, sync_on_find, tun_outer);
--	}
--
- 	if (IPPROTO_ICMP == protocol) {
- 		return sfe_ipv4_recv_icmp(si, skb, dev, len, iph, ihl);
- 	}
-@@ -1279,8 +1270,7 @@ int sfe_ipv4_create_rule(struct sfe_ipv4_rule_create_msg *msg)
- 		}
- 	}
- 
--	if (((IPPROTO_GRE == tuple->protocol) || (IPPROTO_ESP == tuple->protocol)) &&
--					!sfe_ipv4_is_local_ip(si, original_cm->match_dest_ip)) {
-+	if ((IPPROTO_GRE == tuple->protocol) && !sfe_ipv4_is_local_ip(si, original_cm->match_dest_ip)) {
- 		original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_PASSTHROUGH;
- 	}
- 
-@@ -1460,8 +1450,7 @@ int sfe_ipv4_create_rule(struct sfe_ipv4_rule_create_msg *msg)
- 		reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT_DEV_ADMISSION;
- 	}
- 
--	if (((IPPROTO_GRE == tuple->protocol) || (IPPROTO_ESP == tuple->protocol)) &&
--					!sfe_ipv4_is_local_ip(si, reply_cm->match_dest_ip)) {
-+	if ((IPPROTO_GRE == tuple->protocol) && !sfe_ipv4_is_local_ip(si, reply_cm->match_dest_ip)) {
- 		reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_PASSTHROUGH;
- 	}
- 
-@@ -1571,22 +1560,6 @@ int sfe_ipv4_create_rule(struct sfe_ipv4_rule_create_msg *msg)
- 	}
- #endif
- 
--	if ((IPPROTO_ESP == tuple->protocol) && !(reply_cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_PASSTHROUGH)) {
--		rcu_read_lock();
--		reply_cm->proto = rcu_dereference(inet_protos[IPPROTO_ESP]);
--		rcu_read_unlock();
--
--		if (unlikely(!reply_cm->proto)) {
--			kfree(reply_cm);
--			kfree(original_cm);
--			kfree(c);
--			dev_put(src_dev);
--			dev_put(dest_dev);
--			DEBUG_WARN("sfe: ESP proto handler is not registered\n");
--			return -EPERM;
--		}
--	}
--
- #ifdef CONFIG_NF_FLOW_COOKIE
- 	reply_cm->flow_cookie = 0;
- #endif
-diff --git a/qca-nss-sfe/sfe_ipv4.h b/qca-nss-sfe/sfe_ipv4.h
-index fcbc09b..4e8169b 100644
---- a/qca-nss-sfe/sfe_ipv4.h
-+++ b/qca-nss-sfe/sfe_ipv4.h
-@@ -288,10 +288,6 @@ enum sfe_ipv4_exception_events {
- 	SFE_IPV4_EXCEPTION_EVENT_GRE_IP_OPTIONS_OR_INITIAL_FRAGMENT,
- 	SFE_IPV4_EXCEPTION_EVENT_GRE_SMALL_TTL,
- 	SFE_IPV4_EXCEPTION_EVENT_GRE_NEEDS_FRAGMENTATION,
--	SFE_IPV4_EXCEPTION_EVENT_ESP_NO_CONNECTION,
--	SFE_IPV4_EXCEPTION_EVENT_ESP_IP_OPTIONS_OR_INITIAL_FRAGMENT,
--	SFE_IPV4_EXCEPTION_EVENT_ESP_NEEDS_FRAGMENTATION,
--	SFE_IPV4_EXCEPTION_EVENT_ESP_SMALL_TTL,
- 	SFE_IPV4_EXCEPTION_EVENT_LAST
- };
- 
-diff --git a/qca-nss-sfe/sfe_ipv4_esp.c b/qca-nss-sfe/sfe_ipv4_esp.c
-deleted file mode 100644
-index f0b4941..0000000
---- a/qca-nss-sfe/sfe_ipv4_esp.c
-+++ /dev/null
-@@ -1,295 +0,0 @@
--/*
-- * sfe_ipv4_esp.c
-- *	Shortcut forwarding engine - IPv4 ESP implementation
-- *
-- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
-- *
-- * Permission to use, copy, modify, and/or distribute this software for any
-- * purpose with or without fee is hereby granted, provided that the above
-- * copyright notice and this permission notice appear in all copies.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-- */
--
--#include <linux/skbuff.h>
--#include <net/protocol.h>
--#include <net/ip.h>
--#include <linux/etherdevice.h>
--#include <linux/lockdep.h>
--
--#include "sfe_debug.h"
--#include "sfe_api.h"
--#include "sfe.h"
--#include "sfe_flow_cookie.h"
--#include "sfe_ipv4.h"
--#include "sfe_ipv4_esp.h"
--
--/*
-- * sfe_ipv4_recv_esp()
-- *	Handle ESP packet receives and forwarding
-- */
--int sfe_ipv4_recv_esp(struct sfe_ipv4 *si, struct sk_buff *skb, struct net_device *dev,
--				unsigned int len, struct iphdr *iph, unsigned int ihl,
--				bool sync_on_find, bool tun_outer)
--{
--	struct sfe_ipv4_connection_match *cm;
--	struct net_device *xmit_dev;
--	struct net_protocol *ipprot;
--	netdev_features_t features;
--	bool passthrough;
--	bool bridge_flow;
--	bool fast_xmit;
--	bool hw_csum;
--	__be32 src_ip;
--	__be32 dest_ip;
--	bool ret;
--	u8 ttl;
--
--	/*
--	 * Read the IP address from the iphdr, and set the src/dst ports to 0.
--	 */
--	src_ip = iph->saddr;
--	dest_ip = iph->daddr;
--	rcu_read_lock();
--
--	/*
--	 * Look for a connection match.
--	 */
--#ifdef CONFIG_NF_FLOW_COOKIE
--	cm = si->sfe_flow_cookie_table[skb->flow_cookie & SFE_FLOW_COOKIE_MASK].match;
--	if (unlikely(!cm)) {
--		cm = sfe_ipv4_find_ipv4_connection_match_rcu(si, dev, IPPROTO_ESP, src_ip, 0, dest_ip, 0);
--	}
--#else
--	cm = sfe_ipv4_find_connection_match_rcu(si, dev, IPPROTO_ESP, src_ip, 0, dest_ip, 0);
--#endif
--	if (unlikely(!cm)) {
--		rcu_read_unlock();
--		sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_ESP_NO_CONNECTION);
--		DEBUG_TRACE("no connection found for esp packet\n");
--		return 0;
--	}
--
--	/*
--	 * Source interface validate.
--	 */
--	if (unlikely((cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK) && (cm->match_dev != dev))) {
--		struct sfe_ipv4_connection *c = cm->connection;
--		int ret;
--
--		spin_lock_bh(&si->lock);
--		ret = sfe_ipv4_remove_connection(si, c);
--		spin_unlock_bh(&si->lock);
--
--		if (ret) {
--			sfe_ipv4_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
--		}
--		rcu_read_unlock();
--		sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_INVALID_SRC_IFACE);
--		DEBUG_TRACE("flush on wrong source interface check failure\n");
--		return 0;
--	}
--
--	passthrough = cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_PASSTHROUGH;
--	bridge_flow = !!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_BRIDGE_FLOW);
--
--	/*
--	 * If our packet has been marked as "sync on find" we can't actually
--	 * forward it in the fast path, but now that we've found an associated
--	 * connection we need sync its status before exception it to slow path unless
--	 * it is passthrough (packets not directed to DUT) packet.
--	 * TODO: revisit to ensure that pass through traffic is not bypassing firewall for fragmented cases
--	 */
--	if (unlikely(sync_on_find) && !passthrough) {
--		sfe_ipv4_sync_status(si, cm->connection, SFE_SYNC_REASON_STATS);
--		rcu_read_unlock();
--		sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_ESP_IP_OPTIONS_OR_INITIAL_FRAGMENT);
--		DEBUG_TRACE("%px: sfe: sync on find\n", cm);
--		return 0;
--	}
--
--	/*
--	 * Check if skb was cloned. If it was, unshare it.
--	 */
--	if (unlikely(skb_cloned(skb))) {
--		DEBUG_TRACE("%px: skb is a cloned skb\n", skb);
--		skb = skb_unshare(skb, GFP_ATOMIC);
--		if (!skb) {
--			DEBUG_WARN("Failed to unshare the cloned skb\n");
--			rcu_read_unlock();
--			return 0;
--		}
--
--		/*
--		 * Update the iphdr pointer with the unshared skb's data area.
--		 */
--		iph = (struct iphdr *)skb->data;
--	}
--
--	/*
--	 * Enable HW csum if rx checksum is verified and xmit interface is CSUM offload capable.
--	 */
--	hw_csum = !!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_CSUM_OFFLOAD) && (skb->ip_summed == CHECKSUM_UNNECESSARY);
--
--	/*
--	 * proto decap packet.
--	 *	Invoke the inet_protocol handler for delivery of the packet.
--	 */
--	ipprot = rcu_dereference(cm->proto);
--	if (likely(ipprot)) {
--		skb_reset_network_header(skb);
--		skb_pull(skb, ihl);
--		skb_reset_transport_header(skb);
--		xmit_dev = cm->xmit_dev;
--		skb->dev = xmit_dev;
--
--		ret = ipprot->handler(skb);
--		if (ret) {
--			rcu_read_unlock();
--			this_cpu_inc(si->stats_pcpu->packets_not_forwarded64);
--			DEBUG_TRACE("ESP handler returned error %u\n", ret);
--			return 0;
--		}
--
--		/*
--		 * Update traffic stats.
--		 */
--		atomic_inc(&cm->rx_packet_count);
--		atomic_add(len, &cm->rx_byte_count);
--
--		rcu_read_unlock();
--		this_cpu_inc(si->stats_pcpu->packets_forwarded64);
--		return 1;
--	}
--
--	/*
--	 * esp passthrough / ip local out scenarios.
--	 */
--	/*
--	 * If our packet is larger than the MTU of the transmit interface then
--	 * we can't forward it easily.
--	 */
--	if (unlikely(len > cm->xmit_dev_mtu)) {
--		sfe_ipv4_sync_status(si, cm->connection, SFE_SYNC_REASON_STATS);
--		rcu_read_unlock();
--		sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_ESP_NEEDS_FRAGMENTATION);
--		DEBUG_TRACE("%px: sfe: larger than MTU\n", cm);
--		return 0;
--	}
--
--	/*
--	 * need to ensure that TTL is >=2.
--	 */
--	ttl = iph->ttl;
--	if (!bridge_flow && (ttl < 2) && passthrough) {
--		sfe_ipv4_sync_status(si, cm->connection, SFE_SYNC_REASON_STATS);
--		rcu_read_unlock();
--
--		DEBUG_TRACE("%px: sfe: TTL too low\n", skb);
--		sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_ESP_SMALL_TTL);
--		return 0;
--	}
--
--	/*
--	 * decrement TTL by 1.
--	 */
--	iph->ttl = (ttl - (u8)(!bridge_flow && !tun_outer));
--
--	/*
--	 * Update DSCP
--	 */
--	if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_DSCP_REMARK)) {
--		iph->tos = (iph->tos & SFE_IPV4_DSCP_MASK) | cm->dscp;
--	}
--
--	/*
--	 * Replace the IP checksum.
--	 */
--	if (likely(hw_csum)) {
--		skb->ip_summed = CHECKSUM_PARTIAL;
--	} else {
--		iph->check = sfe_ipv4_gen_ip_csum(iph);
--	}
--
--	/*
--	 * Update traffic stats.
--	 */
--	atomic_inc(&cm->rx_packet_count);
--	atomic_add(len, &cm->rx_byte_count);
--
--	xmit_dev = cm->xmit_dev;
--	skb->dev = xmit_dev;
--
--	/*
--	 * write the layer - 2 header.
--	 */
--	if (likely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_WRITE_L2_HDR)) {
--		if (unlikely(!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_WRITE_FAST_ETH_HDR))) {
--			dev_hard_header(skb, xmit_dev, ETH_P_IP, cm->xmit_dest_mac, cm->xmit_src_mac, len);
--		} else {
--			/*
--			 * For the simple case we write this really fast.
--			 */
--			struct ethhdr *eth = (struct ethhdr *)__skb_push(skb, ETH_HLEN);
--			eth->h_proto = htons(ETH_P_IP);
--			ether_addr_copy((u8 *)eth->h_dest, (u8 *)cm->xmit_dest_mac);
--			ether_addr_copy((u8 *)eth->h_source, (u8 *)cm->xmit_src_mac);
--		}
--	}
--
--	/*
--	 * Update priority of skb
--	 */
--	if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_PRIORITY_REMARK)) {
--		skb->priority = cm->priority;
--	}
--
--	/*
--	 * Mark outgoing packet.
--	 */
--	if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_MARK)) {
--		skb->mark = cm->mark;
--	}
--
--	/*
--	 * For the first packets, check if it could got fast xmit.
--	 */
--	if (unlikely(!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT_FLOW_CHECKED)
--				&& (cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT_DEV_ADMISSION))){
--		cm->features = netif_skb_features(skb);
--		if (likely(sfe_fast_xmit_check(skb, cm->features))) {
--			cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT;
--		}
--		cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT_FLOW_CHECKED;
--	}
--
--	features = cm->features;
--	fast_xmit = !!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT);
--
--	rcu_read_unlock();
--	this_cpu_inc(si->stats_pcpu->packets_forwarded64);
--	prefetch(skb_shinfo(skb));
--
--	/*
--	 * We do per packet condition check before we could fast xmit the
--	 * packet.
--	 */
--	if (likely(fast_xmit && dev_fast_xmit(skb, xmit_dev, features))) {
--		this_cpu_inc(si->stats_pcpu->packets_fast_xmited64);
--		return 1;
--	}
--
--	/*
--	 * Mark that this packet has been fast forwarded.
--	 */
--	skb->fast_forwarded = 1;
--
--	dev_queue_xmit(skb);
--	return 1;
--}
-diff --git a/qca-nss-sfe/sfe_ipv4_esp.h b/qca-nss-sfe/sfe_ipv4_esp.h
-deleted file mode 100644
-index f889605..0000000
---- a/qca-nss-sfe/sfe_ipv4_esp.h
-+++ /dev/null
-@@ -1,21 +0,0 @@
--/*
-- * sfe_ipv4_esp.h
-- *	Shortcut forwarding engine - IPv4 ESP header file
-- *
-- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
-- *
-- * Permission to use, copy, modify, and/or distribute this software for any
-- * purpose with or without fee is hereby granted, provided that the above
-- * copyright notice and this permission notice appear in all copies.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-- */
--
--int sfe_ipv4_recv_esp(struct sfe_ipv4 *si, struct sk_buff *skb, struct net_device *dev, unsigned int len,
--			struct iphdr *iph, unsigned int ihl, bool sync_on_find, bool tun_outer);
-diff --git a/qca-nss-sfe/sfe_ipv6.c b/qca-nss-sfe/sfe_ipv6.c
-index 9b9539f..372d9fb 100644
---- a/qca-nss-sfe/sfe_ipv6.c
-+++ b/qca-nss-sfe/sfe_ipv6.c
-@@ -46,7 +46,6 @@
- #include "sfe_pppoe.h"
- #include "sfe_ipv6_tunipip6.h"
- #include "sfe_ipv6_gre.h"
--#include "sfe_ipv6_esp.h"
- 
- #define sfe_ipv6_addr_copy(src, dest) memcpy((void *)(dest), (void *)(src), 16)
- 
-@@ -105,10 +104,6 @@ static char *sfe_ipv6_exception_events_string[SFE_IPV6_EXCEPTION_EVENT_LAST] = {
- 	"GRE_IP_OPTIONS_OR_INITIAL_FRAGMENT",
- 	"GRE_SMALL_TTL",
- 	"GRE_NEEDS_FRAGMENTATION",
--	"ESP_NO_CONNECTION",
--	"ESP_IP_OPTIONS_OR_INITIAL_FRAGMENT",
--	"ESP_NEEDS_FRAGMENTATION",
--	"ESP_SMALL_TTL"
- };
- 
- static struct sfe_ipv6 __si6;
-@@ -868,10 +863,6 @@ int sfe_ipv6_recv(struct net_device *dev, struct sk_buff *skb, struct sfe_l2_inf
- 		return sfe_ipv6_recv_tcp(si, skb, dev, len, iph, ihl, sync_on_find, l2_info);
- 	}
- 
--	if (IPPROTO_ESP == next_hdr) {
--		return sfe_ipv6_recv_esp(si, skb, dev, len, iph, ihl, sync_on_find, tun_outer);
--	}
--
- 	if (IPPROTO_ICMPV6 == next_hdr) {
- 		return sfe_ipv6_recv_icmp(si, skb, dev, len, iph, ihl);
- 	}
-@@ -1547,7 +1538,7 @@ int sfe_ipv6_create_rule(struct sfe_ipv6_rule_create_msg *msg)
- #ifdef SFE_GRE_TUN_ENABLE
- 	if ((IPPROTO_GRE == tuple->protocol) && !(reply_cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_PASSTHROUGH)) {
- 		rcu_read_lock();
--		reply_cm->proto = rcu_dereference(inet6_protos[IPPROTO_GRE]);
-+		reply_cm->proto = rcu_dereference(inet6_protos[tuple->protocol]);
- 		rcu_read_unlock();
- 
- 		if (unlikely(!reply_cm->proto)) {
-@@ -1564,24 +1555,6 @@ int sfe_ipv6_create_rule(struct sfe_ipv6_rule_create_msg *msg)
- 	}
- #endif
- 
--	if ((IPPROTO_ESP == tuple->protocol) && !(reply_cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_PASSTHROUGH)) {
--		rcu_read_lock();
--		reply_cm->proto = rcu_dereference(inet6_protos[IPPROTO_ESP]);
--		rcu_read_unlock();
--
--		if (unlikely(!reply_cm->proto)) {
--			this_cpu_inc(si->stats_pcpu->connection_create_failures64);
--			spin_unlock_bh(&si->lock);
--			kfree(reply_cm);
--			kfree(original_cm);
--			kfree(c);
--			dev_put(src_dev);
--			dev_put(dest_dev);
--			DEBUG_WARN("sfe: ESP proto handler is not registered\n");
--			return -EPERM;
--		}
--	}
--
- 	/*
- 	 * Decapsulation path have proto set.
- 	 * This is used to differentiate de/encap, and call protocol specific handler.
-diff --git a/qca-nss-sfe/sfe_ipv6.h b/qca-nss-sfe/sfe_ipv6.h
-index f9a33f8..9c78f1c 100644
---- a/qca-nss-sfe/sfe_ipv6.h
-+++ b/qca-nss-sfe/sfe_ipv6.h
-@@ -307,10 +307,6 @@ enum sfe_ipv6_exception_events {
- 	SFE_IPV6_EXCEPTION_EVENT_GRE_IP_OPTIONS_OR_INITIAL_FRAGMENT,
- 	SFE_IPV6_EXCEPTION_EVENT_GRE_SMALL_TTL,
- 	SFE_IPV6_EXCEPTION_EVENT_GRE_NEEDS_FRAGMENTATION,
--	SFE_IPV6_EXCEPTION_EVENT_ESP_NO_CONNECTION,
--	SFE_IPV6_EXCEPTION_EVENT_ESP_IP_OPTIONS_OR_INITIAL_FRAGMENT,
--	SFE_IPV6_EXCEPTION_EVENT_ESP_NEEDS_FRAGMENTATION,
--	SFE_IPV6_EXCEPTION_EVENT_ESP_SMALL_TTL,
- 	SFE_IPV6_EXCEPTION_EVENT_LAST
- };
- 
-diff --git a/qca-nss-sfe/sfe_ipv6_esp.c b/qca-nss-sfe/sfe_ipv6_esp.c
-deleted file mode 100644
-index 7a152e8..0000000
---- a/qca-nss-sfe/sfe_ipv6_esp.c
-+++ /dev/null
-@@ -1,275 +0,0 @@
--/*
-- * sfe_ipv6_esp.c
-- *	Shortcut forwarding engine - IPv6 ESP implementation
-- *
-- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
-- *
-- * Permission to use, copy, modify, and/or distribute this software for any
-- * purpose with or without fee is hereby granted, provided that the above
-- * copyright notice and this permission notice appear in all copies.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-- */
--
--#include <linux/skbuff.h>
--#include <net/protocol.h>
--#include <net/ip6_checksum.h>
--#include <linux/etherdevice.h>
--#include <linux/version.h>
--
--#include "sfe_debug.h"
--#include "sfe_api.h"
--#include "sfe.h"
--#include "sfe_flow_cookie.h"
--#include "sfe_ipv6.h"
--#include "sfe_ipv6_esp.h"
--
--/*
-- * sfe_ipv6_recv_esp()
-- *	Handle ESP packet receives and forwarding
-- */
--int sfe_ipv6_recv_esp(struct sfe_ipv6 *si, struct sk_buff *skb, struct net_device *dev,
--				unsigned int len, struct ipv6hdr *iph, unsigned int ihl,
--				bool sync_on_find, bool tun_outer)
--{
--	struct sfe_ipv6_connection_match *cm;
--	struct sfe_ipv6_addr *src_ip;
--	struct sfe_ipv6_addr *dest_ip;
--	struct net_device *xmit_dev;
--	struct inet6_protocol *ipprot;
--	netdev_features_t features;
--	bool bridge_flow;
--	bool passthrough;
--	bool fast_xmit;
--	bool ret;
--
--	/*
--	 * Read the IP address from the iphdr, and set the src/dst ports to 0.
--	 */
--	src_ip = (struct sfe_ipv6_addr *)iph->saddr.s6_addr32;
--	dest_ip = (struct sfe_ipv6_addr *)iph->daddr.s6_addr32;
--	rcu_read_lock();
--
--	/*
--	 * Look for a connection match.
--	 */
--#ifdef CONFIG_NF_FLOW_COOKIE
--	cm = si->sfe_flow_cookie_table[skb->flow_cookie & SFE_FLOW_COOKIE_MASK].match;
--	if (unlikely(!cm)) {
--		cm = sfe_ipv6_find_connection_match_rcu(si, dev, IPPROTO_ESP, src_ip, 0, dest_ip, 0);
--	}
--#else
--	cm = sfe_ipv6_find_connection_match_rcu(si, dev, IPPROTO_ESP, src_ip, 0, dest_ip, 0);
--#endif
--	if (unlikely(!cm)) {
--		rcu_read_unlock();
--		sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_ESP_NO_CONNECTION);
--
--		DEBUG_TRACE("no connection found for esp packet\n");
--		return 0;
--	}
--
--	/*
--	 * Source interface validate.
--	 */
--	if (unlikely((cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK) && (cm->match_dev != dev))) {
--		struct sfe_ipv6_connection *c = cm->connection;
--		int ret;
--
--		spin_lock_bh(&si->lock);
--		ret = sfe_ipv6_remove_connection(si, c);
--		spin_unlock_bh(&si->lock);
--
--		if (ret) {
--			sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
--		}
--		rcu_read_unlock();
--		sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_INVALID_SRC_IFACE);
--		DEBUG_TRACE("flush on wrong source interface check failure\n");
--		return 0;
--	}
--
--	passthrough = cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_PASSTHROUGH;
--	bridge_flow = !!(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_BRIDGE_FLOW);
--
--	/*
--	 * If our packet has beern marked as "sync on find" we can't actually
--	 * forward it in the fast path, but now that we've found an associated
--	 * connection we need sync its status before exception it to slow path. unless
--	 * it is passthrough packet.
--	 * TODO: revisit to ensure that pass through traffic is not bypassing firewall for fragmented cases
--	 */
--	if (unlikely(sync_on_find) && !passthrough) {
--		sfe_ipv6_sync_status(si, cm->connection, SFE_SYNC_REASON_STATS);
--		rcu_read_unlock();
--
--		sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_ESP_IP_OPTIONS_OR_INITIAL_FRAGMENT);
--		DEBUG_TRACE("Sync on find\n");
--		return 0;
--	}
--
--	/*
--	 * Check if skb was cloned. If it was, unshare it.
--	 */
--	if (unlikely(skb_cloned(skb))) {
--		DEBUG_TRACE("%px: skb is a cloned skb\n", skb);
--		skb = skb_unshare(skb, GFP_ATOMIC);
--		if (!skb) {
--			DEBUG_WARN("Failed to unshare the cloned skb\n");
--			rcu_read_unlock();
--			return 0;
--		}
--
--		/*
--		 * Update the iphdr pointer with the unshared skb's data area.
--		 */
--		iph = (struct ipv6hdr *)skb->data;
--	}
--
--	/*
--	 * proto decap packet.
--	 *	Invoke the inet_protocol handler for delivery of the packet.
--	 */
--	ipprot = rcu_dereference(cm->proto);
--	if (likely(ipprot)) {
--		skb_reset_network_header(skb);
--		skb_pull(skb, ihl);
--		skb_reset_transport_header(skb);
--		xmit_dev = cm->xmit_dev;
--		skb->dev = xmit_dev;
--
--		ret = ipprot->handler(skb);
--		if (ret) {
--			rcu_read_unlock();
--			this_cpu_inc(si->stats_pcpu->packets_not_forwarded64);
--			DEBUG_TRACE("ESP handler returned error %u\n", ret);
--			return 0;
--		}
--
--		rcu_read_unlock();
--		this_cpu_inc(si->stats_pcpu->packets_forwarded64);
--		return 1;
--	}
--
--	/*
--	 * esp passthrough / ip local out scenarios
--	 */
--	/*
--	 * If our packet is larger than the MTU of the transmit interface then
--	 * we can't forward it easily.
--	 */
--	if (unlikely(len > cm->xmit_dev_mtu)) {
--		sfe_ipv6_sync_status(si, cm->connection, SFE_SYNC_REASON_STATS);
--		rcu_read_unlock();
--
--		sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_ESP_NEEDS_FRAGMENTATION);
--		DEBUG_TRACE("Larger than MTU\n");
--		return 0;
--	}
--
--	/*
--	 * need to ensure that TTL is >=2.
--	 */
--	if (!bridge_flow && (iph->hop_limit < 2) && passthrough) {
--		sfe_ipv6_sync_status(si, cm->connection, SFE_SYNC_REASON_STATS);
--		rcu_read_unlock();
--
--		sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_ESP_SMALL_TTL);
--		DEBUG_TRACE("hop_limit too low\n");
--		return 0;
--	}
--
--	/*
--	 * decrement TTL by 1.
--	 */
--	iph->hop_limit = iph->hop_limit - (u8)(!bridge_flow && !tun_outer);
--
--	/*
--	 * Update DSCP
--	 */
--	if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_DSCP_REMARK)) {
--		sfe_ipv6_change_dsfield(iph, cm->dscp);
--	}
--
--	/*
--	 * Update traffic stats.
--	 */
--	atomic_inc(&cm->rx_packet_count);
--	atomic_add(len, &cm->rx_byte_count);
--
--	xmit_dev = cm->xmit_dev;
--	skb->dev = xmit_dev;
--
--	/*
--	 * write the layer - 2 header.
--	 */
--	if (likely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_WRITE_L2_HDR)) {
--		if (unlikely(!(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_WRITE_FAST_ETH_HDR))) {
--			dev_hard_header(skb, xmit_dev, ETH_P_IPV6, cm->xmit_dest_mac, cm->xmit_src_mac, len);
--		} else {
--			/*
--			 * For the simple case we write this really fast.
--			 */
--			struct ethhdr *eth = (struct ethhdr *)__skb_push(skb, ETH_HLEN);
--			eth->h_proto = htons(ETH_P_IPV6);
--			ether_addr_copy((u8 *)eth->h_dest, (u8 *)cm->xmit_dest_mac);
--			ether_addr_copy((u8 *)eth->h_source, (u8 *)cm->xmit_src_mac);
--		}
--	}
--
--	/*
--	 * Update priority of skb.
--	 */
--	if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_PRIORITY_REMARK)) {
--		skb->priority = cm->priority;
--	}
--
--	/*
--	 * Mark outgoing packet.
--	 */
--	if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_MARK)) {
--		skb->mark = cm->mark;
--	}
--
--	/*
--	 * For the first packets, check if it could got fast xmit.
--	 */
--	if (unlikely(!(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT_FLOW_CHECKED)
--				&& (cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT_DEV_ADMISSION))){
--		cm->features = netif_skb_features(skb);
--		if (likely(sfe_fast_xmit_check(skb, cm->features))) {
--			cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT;
--		}
--		cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT_FLOW_CHECKED;
--	}
--
--	features = cm->features;
--	fast_xmit = !!(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT);
--
--	rcu_read_unlock();
--	this_cpu_inc(si->stats_pcpu->packets_forwarded64);
--	prefetch(skb_shinfo(skb));
--
--	/*
--	 * We do per packet condition check before we could fast xmit the
--	 * packet.
--	 */
--	if (likely(fast_xmit && dev_fast_xmit(skb, xmit_dev, features))) {
--		this_cpu_inc(si->stats_pcpu->packets_fast_xmited64);
--		return 1;
--	}
--
--	/*
--	 * Mark that this packet has been fast forwarded.
--	 */
--	skb->fast_forwarded = 1;
--
--	dev_queue_xmit(skb);
--	return 1;
--}
-diff --git a/qca-nss-sfe/sfe_ipv6_esp.h b/qca-nss-sfe/sfe_ipv6_esp.h
-deleted file mode 100644
-index 2870670..0000000
---- a/qca-nss-sfe/sfe_ipv6_esp.h
-+++ /dev/null
-@@ -1,21 +0,0 @@
--/*
-- * sfe_ipv6_esp.h
-- *	Shortcut forwarding engine - IPv6 ESP header file
-- *
-- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
-- *
-- * Permission to use, copy, modify, and/or distribute this software for any
-- * purpose with or without fee is hereby granted, provided that the above
-- * copyright notice and this permission notice appear in all copies.
-- *
-- * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
-- * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
-- * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
-- * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
-- * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
-- * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
-- * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
-- */
--
--int sfe_ipv6_recv_esp(struct sfe_ipv6 *si, struct sk_buff *skb, struct net_device *dev, unsigned int len,
--			struct ipv6hdr *iph, unsigned int ihl, bool sync_on_find, bool tun_outer);
--- 
-2.42.0.869.gea05f2083d-goog
-
diff --git a/build_scripts/build_all.sh b/build_scripts/build_all.sh
index 6ef4530..32edba4 100755
--- a/build_scripts/build_all.sh
+++ b/build_scripts/build_all.sh
@@ -4,7 +4,13 @@
 
 top_dir=$(readlink -e $(dirname $0)/../../)
 
-declare -A PRODUCT_LIST=([sirocco]="sirocco-p1 sirocco-b1 sirocco-b3 sirocco-b4" [brezza]="brezza-p0 brezza-p1 brezza-b1 brezza-b3 brezza-b4")
+# Plan to support different brezza board:
+# brezza-p1: 2022/08
+# brezza-p2: 2022/10
+# brezza-b1: 2023/01
+# brezza-b3: 2023/04
+# brezza-b4: 2023/05
+declare -A PRODUCT_LIST=([sirocco]="sirocco-p1 sirocco-b1 sirocco-b3 sirocco-b4" [brezza]="brezza-p0 brezza-p1 brezza-p2 brezza-b1 brezza-b3 brezza-b4")
 
 function Help() {
   echo "Usage: $0  <eureka_workspace> <product/board> [optional build number] [other options]"
diff --git a/qca-nss-dp/nss_dp_main.c b/qca-nss-dp/nss_dp_main.c
index f9607de..dea8c05 100644
--- a/qca-nss-dp/nss_dp_main.c
+++ b/qca-nss-dp/nss_dp_main.c
@@ -809,6 +809,7 @@
 	uint8_t *maddr;
 	struct nss_dp_dev *dp_priv;
 	struct resource memres_devtree = {0};
+	uint32_t is_wan_interface;
 
 	dp_priv = netdev_priv(netdev);
 
@@ -817,13 +818,17 @@
 		return -EFAULT;
 	}
 
+        if (of_property_read_u32(np, "is_wan", &is_wan_interface)) {
+                pr_err("%s: error reading is_wan\n", np->name);
+                return -EFAULT;
+        }
+
 	/*
-	 * change interface 1 name to wan0
-	 * change interface 2 name to lan0
+	 * Name WAN/LAN interfaces as per hint from DTSI
 	 */
-	if (dp_priv->macid == 1) {
+	if (is_wan_interface) {
 		strcpy(netdev->name, "wan0");
-	} else if (dp_priv->macid == 2) {
+	} else {
 		strcpy(netdev->name, "lan0");
 	}
 
diff --git a/qca-nss-ecm/Makefile b/qca-nss-ecm/Makefile
index debcc10..0fb8871 100644
--- a/qca-nss-ecm/Makefile
+++ b/qca-nss-ecm/Makefile
@@ -210,9 +210,7 @@
 # #############################################################################
 # Define ECM_XFRM_ENABLE=y in order to enable
 # #############################################################################
-ifeq ($(ECM_FRONT_END_NSS_ENABLE), y)
 ccflags-$(ECM_XFRM_ENABLE) += -DECM_XFRM_ENABLE
-endif
 
 # #############################################################################
 # Define ECM_INTERFACE_OVS_BRIDGE_ENABLE=y in order to enable support for OVS
@@ -235,7 +233,7 @@
 ccflags-$(ECM_INTERFACE_IPSEC_ENABLE) += -DECM_INTERFACE_IPSEC_ENABLE
 
 ECM_INTERFACE_IPSEC_GLUE_LAYER_SUPPORT_ENABLE=n
-ifeq ($(SoC),$(filter $(SoC), ipq807x ipq807x_64 ipq60xx ipq60xx_64 ipq50xx ipq50xx_64))
+ifeq ($(SoC),$(filter $(SoC), ipq807x ipq807x_64 ipq60xx ipq60xx_64 ipq50xx ipq50xx_64 ipq95xx_32 ipq95xx))
 ECM_INTERFACE_IPSEC_GLUE_LAYER_SUPPORT_ENABLE=$(ECM_INTERFACE_IPSEC_ENABLE)
 ccflags-$(ECM_INTERFACE_IPSEC_GLUE_LAYER_SUPPORT_ENABLE) += -DECM_INTERFACE_IPSEC_GLUE_LAYER_SUPPORT_ENABLE
 endif
diff --git a/qca-nss-ecm/build.sh b/qca-nss-ecm/build.sh
index f1528d0..47857ef 100755
--- a/qca-nss-ecm/build.sh
+++ b/qca-nss-ecm/build.sh
@@ -23,7 +23,7 @@
 soc_type=ipq50xx
 extra_cflags="-I${qca_sfe_path}/exports"
 
-build_flags="ECM_CLASSIFIER_HYFI_ENABLE=n ECM_MULTICAST_ENABLE=n ECM_INTERFACE_IPSEC_ENABLE=n ECM_INTERFACE_PPTP_ENABLE=n ECM_INTERFACE_L2TPV2_ENABLE=n ECM_INTERFACE_GRE_TAP_ENABLE=n ECM_INTERFACE_GRE_TUN_ENABLE=n ECM_INTERFACE_SIT_ENABLE=n ECM_INTERFACE_TUNIPIP6_ENABLE=n ECM_INTERFACE_RAWIP_ENABLE=n ECM_INTERFACE_BOND_ENABLE=n ECM_XFRM_ENABLE=n ECM_FRONT_END_SFE_ENABLE=y ECM_NON_PORTED_SUPPORT_ENABLE=n ECM_INTERFACE_MAP_T_ENABLE=n ECM_INTERFACE_VXLAN_ENABLE=n ECM_INTERFACE_OVS_BRIDGE_ENABLE=n ECM_CLASSIFIER_OVS_ENABLE=n ECM_CLASSIFIER_DSCP_IGS=n ECM_IPV6_ENABLE=y ECM_FRONT_END_NSS_ENABLE=n EXAMPLES_BUILD_OVS=n"
+build_flags="ECM_CLASSIFIER_HYFI_ENABLE=n ECM_MULTICAST_ENABLE=n ECM_INTERFACE_IPSEC_GLUE_LAYER_SUPPORT_ENABLE=y ECM_INTERFACE_IPSEC_ENABLE=y ECM_INTERFACE_PPTP_ENABLE=n ECM_INTERFACE_L2TPV2_ENABLE=n ECM_INTERFACE_GRE_TAP_ENABLE=n ECM_INTERFACE_GRE_TUN_ENABLE=n ECM_INTERFACE_SIT_ENABLE=n ECM_INTERFACE_TUNIPIP6_ENABLE=n ECM_INTERFACE_RAWIP_ENABLE=n ECM_INTERFACE_BOND_ENABLE=n ECM_XFRM_ENABLE=y ECM_FRONT_END_SFE_ENABLE=y ECM_NON_PORTED_SUPPORT_ENABLE=y ECM_INTERFACE_MAP_T_ENABLE=n ECM_INTERFACE_VXLAN_ENABLE=n ECM_INTERFACE_OVS_BRIDGE_ENABLE=n ECM_CLASSIFIER_OVS_ENABLE=n ECM_CLASSIFIER_DSCP_IGS=n ECM_IPV6_ENABLE=y ECM_FRONT_END_NSS_ENABLE=n EXAMPLES_BUILD_OVS=n"
 
 ##################################################
 # Build Kernel Module
diff --git a/qca-nss-ecm/ecm_interface.c b/qca-nss-ecm/ecm_interface.c
index 7058f15..c86349d 100644
--- a/qca-nss-ecm/ecm_interface.c
+++ b/qca-nss-ecm/ecm_interface.c
@@ -1,7 +1,7 @@
 /*
  **************************************************************************
  * Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
@@ -33,6 +33,7 @@
 #include <net/ip_fib.h>
 #include <net/ip.h>
 #include <net/tcp.h>
+#include <net/xfrm.h>
 #include <net/addrconf.h>
 #include <asm/unaligned.h>
 #include <asm/uaccess.h>	/* for put_user */
@@ -254,16 +255,48 @@
 struct net_device *ecm_interface_get_and_hold_ipsec_tun_netdev(struct net_device *dev, struct sk_buff *skb, int32_t *interface_type)
 {
 	struct net_device *ipsec_dev = NULL;
+	struct xfrm_state *x = NULL;
+	uint8_t ip_ver = ip_hdr(skb)->version;
 #ifdef ECM_INTERFACE_IPSEC_GLUE_LAYER_SUPPORT_ENABLE
 	spin_lock_bh(&ecm_interface_lock);
-	if (!ecm_interface_ipsec_cb.tunnel_get_and_hold) {
+
+	/*
+	 * Check if callback is registered with us or not.
+	 */
+	if (ecm_interface_ipsec_cb.tunnel_get_and_hold) {
+		ipsec_dev = ecm_interface_ipsec_cb.tunnel_get_and_hold(dev, skb, interface_type);
 		spin_unlock_bh(&ecm_interface_lock);
-		DEBUG_WARN("IPSec glue module is not loaded yet\n");
-		return NULL;
+		return ipsec_dev;
 	}
 
-	ipsec_dev = ecm_interface_ipsec_cb.tunnel_get_and_hold(dev, skb, interface_type);
+	/*
+	 * If the packet is processed via the XFRM stack we can directly parse it.
+	 */
+	*interface_type = 0;
+	BUG_ON((ip_ver != IPVERSION) && (ip_ver != 6));
+	x = skb_dst(skb)->xfrm;
+
+	/*
+	 * XFRM dst is valid for encapsulation.
+	 */
+	if (x && (x->xflags & XFRM_STATE_OFFLOAD_NSS)) {
+		ipsec_dev = x->offload_dev;
+		BUG_ON(!ipsec_dev);
+		dev_hold(ipsec_dev);
+		spin_unlock_bh(&ecm_interface_lock);
+		return ipsec_dev;
+	}
+
+	/*
+	 * IPsec netdevice interface is set for the decapsulated packet.
+	 */
+	ipsec_dev = dev_get_by_index(&init_net, skb->skb_iif);
+	if (ipsec_dev && (ipsec_dev->type != ECM_ARPHRD_IPSEC_TUNNEL_TYPE)) {
+		ipsec_dev = NULL;
+	}
+
 	spin_unlock_bh(&ecm_interface_lock);
+
 #endif
 	return ipsec_dev;
 }
@@ -2579,7 +2612,6 @@
 	case ECM_DB_IFACE_TYPE_GRE_TUN:
 	case ECM_DB_IFACE_TYPE_GRE_TAP:
 	case ECM_DB_IFACE_TYPE_VXLAN:
-	case ECM_DB_IFACE_TYPE_IPSEC_TUNNEL:
 		if (src_dev) {
 			*mtu = src_dev->mtu;
 		} else {
@@ -3186,6 +3218,9 @@
 			dev_put(ipsec_dev);
 			return NULL;
 		}
+
+		DEBUG_TRACE("Obtained IPSec device is %s, and it's interface num is %d\n", ipsec_dev->name,
+				ae_interface_num);
 		dev_put(ipsec_dev);
 
 		/*
@@ -3203,22 +3238,6 @@
 #endif
 		type_info.ipsec_tunnel.os_specific_ident = dev_interface_num;
 
-		/*
-		 * Override the MTU size in the decap direction in case of IPSec tunnel.
-		 * This will apply to IPsec->WAN rule.
-		 * TODO: Move this override to accelerate function.
-		 */
-		if (ip_hdr(skb)->version == IPVERSION) {
-			if ((ip_hdr(skb)->protocol == IPPROTO_ESP) ||
-			    ((ip_hdr(skb)->protocol == IPPROTO_UDP) &&
-			     (udp_hdr(skb)->dest == htons(4500)))) {
-				dev_mtu = ECM_DB_IFACE_MTU_MAX;
-			}
-		} else {
-			if (ipv6_hdr(skb)->nexthdr == IPPROTO_ESP) {
-				dev_mtu = ECM_DB_IFACE_MTU_MAX;
-			}
-		}
 
 		ii = ecm_interface_ipsec_tunnel_interface_establish(&type_info.ipsec_tunnel, dev_name, dev_interface_num, ae_interface_num, dev_mtu);
 		if (ii) {
diff --git a/qca-nss-ecm/examples/ecm_sfe_l2.c b/qca-nss-ecm/examples/ecm_sfe_l2.c
index dd64653..b69df5a 100644
--- a/qca-nss-ecm/examples/ecm_sfe_l2.c
+++ b/qca-nss-ecm/examples/ecm_sfe_l2.c
@@ -155,7 +155,6 @@
 						   tuple->dest_addr, tuple->dest_port, tuple->protocol);
 		if (!rule) {
 			spin_unlock_bh(&ecm_sfe_l2_policy_rules_lock);
-			pr_warn("No rule with this tuple\n");
 			goto done;
 		}
 		direction = rule->direction;
@@ -175,7 +174,6 @@
 
 		if (!rule) {
 			spin_unlock_bh(&ecm_sfe_l2_policy_rules_lock);
-			pr_warn("No rule with this tuple\n");
 			goto done;
 		}
 		direction = rule->direction;
@@ -296,7 +294,7 @@
 	spin_unlock_bh(&ecm_sfe_l2_policy_rules_lock);
 	kfree(rule);
 
-	pr_info("rule deleted\n");
+	pr_debug("rule deleted\n");
 	return true;
 }
 
@@ -312,7 +310,7 @@
 	rule = ecm_sfe_l2_policy_rule_find(ip_ver, sip_addr, sport, dip_addr, dport, protocol);
 	if (rule) {
 		if (rule->direction != direction) {
-			pr_info("Update direction of the rule from %d to %d\n", rule->direction, direction);
+			pr_debug("Update direction of the rule from %d to %d\n", rule->direction, direction);
 			rule->direction = direction;
 		}
 		spin_unlock_bh(&ecm_sfe_l2_policy_rules_lock);
@@ -341,7 +339,7 @@
 	list_add(&rule->list, &ecm_sfe_l2_policy_rules);
 	spin_unlock_bh(&ecm_sfe_l2_policy_rules_lock);
 
-	pr_info("rule added\n");
+	pr_debug("rule added\n");
 	return true;
 }
 
@@ -388,13 +386,13 @@
 	 */
 	fields = cmd_buf;
 	while ((token = strsep(&fields, " "))) {
-		pr_info("\ntoken: %s\n", token);
+		pr_debug("\ntoken: %s\n", token);
 
 		option = strsep(&token, "=");
 		value = token;
 
-		pr_info("\t\toption: %s\n", option);
-		pr_info("\t\tvalue: %s\n", value);
+		pr_debug("\t\toption: %s\n", option);
+		pr_debug("\t\tvalue: %s\n", value);
 
 		if (!strcmp(option, "cmd")) {
 			if (sscanf(value, "%d", &cmd)) {
diff --git a/qca-nss-ecm/frontends/ecm_front_end_common.c b/qca-nss-ecm/frontends/ecm_front_end_common.c
index f6b5300..4bf6c46 100644
--- a/qca-nss-ecm/frontends/ecm_front_end_common.c
+++ b/qca-nss-ecm/frontends/ecm_front_end_common.c
@@ -33,6 +33,7 @@
 #include <net/ipv6.h>
 #include <net/addrconf.h>
 #include <net/gre.h>
+#include <net/xfrm.h>
 
 /*
  * Debug output levels
@@ -105,6 +106,92 @@
 	return !!(ecm_fe_feature_list[type] & feature);
 }
 
+/*
+ * ecm_front_end_is_xfrm_flow()
+ *	Check if the flow is an xfrm flow.
+ */
+static bool ecm_front_end_is_xfrm_flow(struct sk_buff *skb, struct ecm_tracker_ip_header *ip_hdr)
+{
+#ifdef CONFIG_XFRM
+	struct dst_entry *dst;
+	struct net *net;
+
+	net = dev_net(skb->dev);
+	if (likely(!net->xfrm.policy_count[XFRM_POLICY_OUT])) {
+		return false;
+	}
+
+	/*
+	 * Packet seen after output transformation. We use the IPCB(skb) to check
+	 * for this condition. No custom code should mangle the IPCB: skb->cb area,
+	 * while the packet is traversing through the INET layer.
+	 */
+	if (ip_hdr->is_v4) {
+		if ((IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)) {
+			DEBUG_TRACE("%px: Packet has undergone xfrm transformation\n", skb);
+			return true;
+		}
+	} else if (IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) {
+		DEBUG_TRACE("%px: Packet has undergone xfrm transformation\n", skb);
+		return true;
+	}
+
+	if (ip_hdr->protocol == IPPROTO_ESP) {
+		DEBUG_TRACE("%px: ESP Passthrough packet\n", skb);
+		return false;
+	}
+
+	/*
+	 * skb's sp is set for decapsulated packet
+	 */
+	if (secpath_exists(skb)) {
+		DEBUG_TRACE("%px: Packet has undergone xfrm decapsulation((%d)\n", skb, ip_hdr->protocol);
+		return true;
+	}
+
+	/*
+	 * dst->xfrm is valid for lan to wan plain packet
+	 */
+	dst = skb_dst(skb);
+	if (dst && dst->xfrm) {
+		DEBUG_TRACE("%px: Plain text packet destined for xfrm(%d)\n", skb, ip_hdr->protocol);
+		return true;
+	}
+#endif
+
+	return false;
+}
+
+/*
+ * ecm_front_end_feature_check()
+ *	Check some specific features for front end acceleration
+ */
+bool ecm_front_end_feature_check(struct sk_buff *skb, struct ecm_tracker_ip_header *ip_hdr)
+{
+	if (ecm_front_end_is_xfrm_flow(skb, ip_hdr)) {
+#ifdef ECM_XFRM_ENABLE
+		struct net_device *ipsec_dev;
+		int32_t interface_type;
+
+		/*
+		 * Check if the transformation for this flow
+		 * is done by AE. If yes, then try to accelerate.
+		 */
+		ipsec_dev = ecm_interface_get_and_hold_ipsec_tun_netdev(NULL, skb, &interface_type);
+		if (!ipsec_dev) {
+			DEBUG_TRACE("%px xfrm flow not managed by NSS; skip it\n", skb);
+			return false;
+		}
+		dev_put(ipsec_dev);
+#else
+		DEBUG_TRACE("%px xfrm flow, but accel is disabled; skip it\n", skb);
+		return false;
+#endif
+	}
+
+	return true;
+}
+
 #ifdef ECM_INTERFACE_BOND_ENABLE
 /*
  * ecm_front_end_bond_notifier_stop()
diff --git a/qca-nss-ecm/frontends/include/ecm_front_end_common.h b/qca-nss-ecm/frontends/include/ecm_front_end_common.h
index 1c0a6d0..056f668 100644
--- a/qca-nss-ecm/frontends/include/ecm_front_end_common.h
+++ b/qca-nss-ecm/frontends/include/ecm_front_end_common.h
@@ -313,5 +313,6 @@
 void ecm_front_end_common_sysctl_unregister(void);
 int ecm_sfe_sysctl_tbl_init(void);
 void ecm_sfe_sysctl_tbl_exit(void);
+bool ecm_front_end_feature_check(struct sk_buff *skb, struct ecm_tracker_ip_header *ip_hdr);
 
 #endif  /* __ECM_FRONT_END_COMMON_H */
diff --git a/qca-nss-ecm/frontends/nss/ecm_nss_common.c b/qca-nss-ecm/frontends/nss/ecm_nss_common.c
index cd3dd5b..7d5b3c0 100644
--- a/qca-nss-ecm/frontends/nss/ecm_nss_common.c
+++ b/qca-nss-ecm/frontends/nss/ecm_nss_common.c
@@ -1,5 +1,5 @@
 /*
- * Copyright (c) 2021 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2021-2022 Qualcomm Innovation Center, Inc. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -70,6 +70,29 @@
 #endif
 
 /*
+ * ecm_nss_feature_check()
+ *	Check some specific features for NSS acceleration
+ */
+bool ecm_nss_feature_check(struct sk_buff *skb, struct ecm_tracker_ip_header *ip_hdr)
+{
+	/*
+	 * If the DSCP value of the packet maps to the NOT accel action type,
+	 * do not accelerate the packet and let it go through the
+	 * slow path.
+	 */
+	if (ip_hdr->protocol == IPPROTO_UDP) {
+		uint8_t action = ip_hdr->is_v4 ?
+			nss_ipv4_dscp_action_get(ip_hdr->dscp) : nss_ipv6_dscp_action_get(ip_hdr->dscp);
+		if (action == NSS_IPV4_DSCP_MAP_ACTION_DONT_ACCEL || action == NSS_IPV6_DSCP_MAP_ACTION_DONT_ACCEL) {
+			DEBUG_TRACE("%px: dscp: %d maps to action not accel type, skip acceleration\n", skb, ip_hdr->dscp);
+			return false;
+		}
+	}
+
+	return ecm_front_end_feature_check(skb, ip_hdr);
+}
+
+/*
  * ecm_nss_ipv4_is_conn_limit_reached()
  *	Connection limit is reached or not ?
  */
diff --git a/qca-nss-ecm/frontends/nss/ecm_nss_common.h b/qca-nss-ecm/frontends/nss/ecm_nss_common.h
index 3f3dda2..aeef71f 100644
--- a/qca-nss-ecm/frontends/nss/ecm_nss_common.h
+++ b/qca-nss-ecm/frontends/nss/ecm_nss_common.h
@@ -310,61 +310,6 @@
 }
 #endif
 
-/*
- * ecm_nss_common_is_xfrm_flow()
- *	Check if the flow is an xfrm flow.
- */
-static inline bool ecm_nss_common_is_xfrm_flow(struct sk_buff *skb, struct ecm_tracker_ip_header *ip_hdr)
-{
-#ifdef CONFIG_XFRM
-	struct dst_entry *dst;
-	struct net *net;
-
-	net = dev_net(skb->dev);
-	if (likely(!net->xfrm.policy_count[XFRM_POLICY_OUT])) {
-		return false;
-	}
-
-	/*
-	 * Packet seen after output transformation. We use the IPCB(skb) to check
-	 * for this condition. No custom code should mangle the IPCB: skb->cb area,
-	 * while the packet is traversing through the INET layer.
-	 */
-	if (ip_hdr->is_v4) {
-		if ((IPCB(skb)->flags & IPSKB_XFRM_TRANSFORMED)) {
-			DEBUG_TRACE("%px: Packet has undergone xfrm transformation\n", skb);
-			return true;
-		}
-	} else if (IP6CB(skb)->flags & IP6SKB_XFRM_TRANSFORMED) {
-		DEBUG_TRACE("%px: Packet has undergone xfrm transformation\n", skb);
-		return true;
-	}
-
-	if (ip_hdr->protocol == IPPROTO_ESP) {
-		DEBUG_TRACE("%px: ESP Passthrough packet\n", skb);
-		return false;
-	}
-
-	/*
-	 * skb's sp is set for decapsulated packet
-	 */
-	if (secpath_exists(skb)) {
-		DEBUG_TRACE("%px: Packet has undergone xfrm decapsulation((%d)\n", skb, ip_hdr->protocol);
-		return true;
-	}
-
-	/*
-	 * dst->xfrm is valid for lan to wan plain packet
-	 */
-	dst = skb_dst(skb);
-	if (dst && dst->xfrm) {
-		DEBUG_TRACE("%px: Plain text packet destined for xfrm(%d)\n", skb, ip_hdr->protocol);
-		return true;
-	}
-#endif
-	return false;
-}
-
 #ifdef ECM_CLASSIFIER_PCC_ENABLE
 /*
  * ecm_nss_common_fill_mirror_info()
@@ -421,50 +366,7 @@
 
 bool ecm_nss_ipv6_is_conn_limit_reached(void);
 bool ecm_nss_ipv4_is_conn_limit_reached(void);
-
-/*
- * ecm_nss_feature_check()
- *	Check some specific features for NSS acceleration
- */
-static inline bool ecm_nss_feature_check(struct sk_buff *skb, struct ecm_tracker_ip_header *ip_hdr)
-{
-	/*
-	 * If the DSCP value of the packet maps to the NOT accel action type,
-	 * do not accelerate the packet and let it go through the
-	 * slow path.
-	 */
-	if (ip_hdr->protocol == IPPROTO_UDP) {
-		uint8_t action = ip_hdr->is_v4 ?
-			nss_ipv4_dscp_action_get(ip_hdr->dscp) : nss_ipv6_dscp_action_get(ip_hdr->dscp);
-		if (action == NSS_IPV4_DSCP_MAP_ACTION_DONT_ACCEL || action == NSS_IPV6_DSCP_MAP_ACTION_DONT_ACCEL) {
-			DEBUG_TRACE("%px: dscp: %d maps to action not accel type, skip acceleration\n", skb, ip_hdr->dscp);
-			return false;
-		}
-	}
-
-	if (ecm_nss_common_is_xfrm_flow(skb, ip_hdr)) {
-#ifdef ECM_XFRM_ENABLE
-		struct net_device *ipsec_dev;
-		int32_t interface_type;
-
-		/*
-		* Check if the transformation for this flow
-		 * is done by NSS. If yes, then only try to accelerate.
-		 */
-		ipsec_dev = ecm_interface_get_and_hold_ipsec_tun_netdev(NULL, skb, &interface_type);
-		if (!ipsec_dev) {
-			DEBUG_TRACE("%px xfrm flow not managed by NSS; skip it\n", skb);
-			return false;
-		}
-		dev_put(ipsec_dev);
-#else
-		DEBUG_TRACE("%px xfrm flow, but accel is disabled; skip it\n", skb);
-		return false;
-#endif
-	}
-
-	return true;
-}
+bool ecm_nss_feature_check(struct sk_buff *skb, struct ecm_tracker_ip_header *ip_hdr);
 
 /*
  * ecm_nss_common_dummy_get_stats_bitmap()
diff --git a/qca-nss-ecm/frontends/sfe/ecm_sfe_common.c b/qca-nss-ecm/frontends/sfe/ecm_sfe_common.c
index 6cb1473..4ceb42d 100644
--- a/qca-nss-ecm/frontends/sfe/ecm_sfe_common.c
+++ b/qca-nss-ecm/frontends/sfe/ecm_sfe_common.c
@@ -78,6 +78,19 @@
 }
 
 /*
+ * ecm_sfe_feature_check()
+ *	Check some specific features for SFE acceleration
+ */
+bool ecm_sfe_feature_check(struct sk_buff *skb, struct ecm_tracker_ip_header *ip_hdr, bool is_routed)
+{
+	if (!is_routed && !sfe_is_l2_feature_enabled()) {
+		return false;
+	}
+
+	return ecm_front_end_feature_check(skb, ip_hdr);
+}
+
+/*
  * ecm_sfe_common_set_stats_bitmap()
  *	Set bit map
  */
@@ -168,6 +181,14 @@
 
 	BUG_ON(!rcu_read_lock_bh_held());
 
+#ifdef ECM_INTERFACE_IPSEC_ENABLE
+	if (dev->type == ECM_ARPHRD_IPSEC_TUNNEL_TYPE) {
+		DEBUG_INFO("Fast xmit is not enabled for ipsec device[%s]\n", dev->name);
+		dev_put(dev);
+		return false;
+	}
+#endif
+
 	/*
 	 * It assume that the qdisc attribute won't change after traffic
 	 * running, if the qdisc changed, we need flush all of the rule.
diff --git a/qca-nss-ecm/frontends/sfe/ecm_sfe_common.h b/qca-nss-ecm/frontends/sfe/ecm_sfe_common.h
index 54625f3..3d33ed3 100644
--- a/qca-nss-ecm/frontends/sfe/ecm_sfe_common.h
+++ b/qca-nss-ecm/frontends/sfe/ecm_sfe_common.h
@@ -68,31 +68,11 @@
 };
 
 /*
- * ecm_sfe_feature_check()
- *	Check some specific features for SFE acceleration
- */
-static inline bool ecm_sfe_feature_check(struct sk_buff *skb, struct ecm_tracker_ip_header *ip_hdr, bool is_routed)
-{
-	if (!is_routed && !sfe_is_l2_feature_enabled()) {
-		return false;
-	}
-
-	return true;
-}
-
-/*
  * ecm_sfe_common_get_interface_number_by_dev()
  *	Returns the acceleration engine interface number based on the net_device object.
  */
 static inline int32_t ecm_sfe_common_get_interface_number_by_dev(struct net_device *dev)
 {
-	/*
-	 * sfe_interface_num for all IPsec tunnels will always be the one specific to acceleration engine.
-	 */
-	if (dev->type == ECM_ARPHRD_IPSEC_TUNNEL_TYPE) {
-		return SFE_SPECIAL_INTERFACE_IPSEC;
-	}
-
 	return dev->ifindex;
 }
 
@@ -158,3 +138,4 @@
 void ecm_sfe_common_tuple_set(struct ecm_front_end_connection_instance *feci,
 			      int32_t from_iface_id, int32_t to_iface_id,
 			      struct ecm_sfe_common_tuple *tuple);
+bool ecm_sfe_feature_check(struct sk_buff *skb, struct ecm_tracker_ip_header *ip_hdr, bool is_routed);
diff --git a/qca-nss-ecm/frontends/sfe/ecm_sfe_ported_ipv4.c b/qca-nss-ecm/frontends/sfe/ecm_sfe_ported_ipv4.c
index c3f5d73..fed74b3 100644
--- a/qca-nss-ecm/frontends/sfe/ecm_sfe_ported_ipv4.c
+++ b/qca-nss-ecm/frontends/sfe/ecm_sfe_ported_ipv4.c
@@ -778,7 +778,6 @@
 				DEBUG_TRACE("%px: IPSEC - additional unsupported\n", npci);
 				break;
 			}
-			nircm->conn_rule.flow_interface_num = SFE_SPECIAL_INTERFACE_IPSEC;
 #else
 			rule_invalid = true;
 			DEBUG_TRACE("%px: IPSEC - unsupported\n", npci);
@@ -1066,7 +1065,6 @@
 				DEBUG_TRACE("%px: IPSEC - additional unsupported\n", npci);
 				break;
 			}
-			nircm->conn_rule.return_interface_num = SFE_SPECIAL_INTERFACE_IPSEC;
 #else
 			rule_invalid = true;
 			DEBUG_TRACE("%px: IPSEC - unsupported\n", npci);
diff --git a/qca-nss-ecm/frontends/sfe/ecm_sfe_ported_ipv6.c b/qca-nss-ecm/frontends/sfe/ecm_sfe_ported_ipv6.c
index a01ee76..c433ea5 100644
--- a/qca-nss-ecm/frontends/sfe/ecm_sfe_ported_ipv6.c
+++ b/qca-nss-ecm/frontends/sfe/ecm_sfe_ported_ipv6.c
@@ -784,7 +784,6 @@
 				DEBUG_TRACE("%px: IPSEC - additional unsupported\n", npci);
 				break;
 			}
-			nircm->conn_rule.flow_interface_num = SFE_SPECIAL_INTERFACE_IPSEC;
 #else
 			rule_invalid = true;
 			DEBUG_TRACE("%px: IPSEC - unsupported\n", npci);
@@ -1072,7 +1071,6 @@
 				DEBUG_TRACE("%px: IPSEC - additional unsupported\n", npci);
 				break;
 			}
-			nircm->conn_rule.return_interface_num = SFE_SPECIAL_INTERFACE_IPSEC;
 #else
 			rule_invalid = true;
 			DEBUG_TRACE("%px: IPSEC - unsupported\n", npci);
diff --git a/qca-nss-sfe/Makefile b/qca-nss-sfe/Makefile
index 74137e0..6ac42d3 100644
--- a/qca-nss-sfe/Makefile
+++ b/qca-nss-sfe/Makefile
@@ -5,8 +5,8 @@
 KERNELVERSION := $(word 1, $(subst ., ,$(KERNELVERSION))).$(word 2, $(subst ., ,$(KERNELVERSION)))
 
 SFE_BASE_OBJS := sfe.o sfe_init.o
-SFE_IPV4_OBJS := sfe_ipv4.o sfe_ipv4_udp.o sfe_ipv4_tcp.o sfe_ipv4_icmp.o
-SFE_IPV6_OBJS := sfe_ipv6.o sfe_ipv6_udp.o sfe_ipv6_tcp.o sfe_ipv6_icmp.o sfe_ipv6_tunipip6.o
+SFE_IPV4_OBJS := sfe_ipv4.o sfe_ipv4_udp.o sfe_ipv4_tcp.o sfe_ipv4_icmp.o sfe_ipv4_esp.o
+SFE_IPV6_OBJS := sfe_ipv6.o sfe_ipv6_udp.o sfe_ipv6_tcp.o sfe_ipv6_icmp.o sfe_ipv6_tunipip6.o sfe_ipv6_esp.o
 SFE_PPPOE_OBJS := sfe_pppoe.o
 
 
@@ -18,6 +18,7 @@
 
 
 obj-m += qca-nss-sfe.o
+obj-m += tunnels/ipsec/
 
 #
 # Base files
diff --git a/qca-nss-sfe/build.sh b/qca-nss-sfe/build.sh
index 09493c5..91e3805 100755
--- a/qca-nss-sfe/build.sh
+++ b/qca-nss-sfe/build.sh
@@ -21,7 +21,7 @@
 kernel_path=$(readlink -e ${sdk_top_dir}/../kernel)
 soc_type=ipq50xx
 extra_cflags="-DSFE_SUPPORT_IPV6"
-build_flags="SFE_SUPPORT_IPV6=y"
+build_flags="SFE_SUPPORT_IPV6=y SFE_PROCESS_LOCAL_OUT=y"
 
 ##################################################
 # Build Kernel Module
@@ -60,6 +60,7 @@
     local module_target_dir="$(GetModulePath ${eureka_src_path} ${product})"
     mkdir -p ${module_target_dir}
     cp -f ${MODULE_NAME}.ko ${module_target_dir}/.
+    cp -f tunnels/ipsec/qca-nss-sfe-xfrm.ko ${module_target_dir}/.
 }
 
 function Usage() {
diff --git a/qca-nss-sfe/sfe.c b/qca-nss-sfe/sfe.c
index 70f1c2c..8bc387c 100644
--- a/qca-nss-sfe/sfe.c
+++ b/qca-nss-sfe/sfe.c
@@ -41,6 +41,7 @@
 #define sfe_ipv6_addr_copy(src, dest) memcpy((void *)(dest), (void *)(src), 16)
 #define sfe_ipv4_stopped(CTX) (rcu_dereference((CTX)->ipv4_stats_sync_cb) == NULL)
 #define sfe_ipv6_stopped(CTX) (rcu_dereference((CTX)->ipv6_stats_sync_cb) == NULL)
+#define SFE_IPSEC_TUNNEL_TYPE 31
 
 typedef enum sfe_exception {
 	SFE_EXCEPTION_IPV4_MSG_UNKNOW,
@@ -248,6 +249,10 @@
 	}
 #endif
 
+	if (dev->type == SFE_IPSEC_TUNNEL_TYPE) {
+		return true;
+	}
+
 	return false;
 }
 
@@ -277,6 +282,10 @@
 		return false;
 	}
 
+	if (dev->type == SFE_IPSEC_TUNNEL_TYPE) {
+		return false;
+	}
+
 	return true;
 }
 
@@ -671,6 +680,9 @@
 	case IPPROTO_GRE:
 		break;
 
+	case IPPROTO_ESP:
+		break;
+
 	default:
 		ret = SFE_CMN_RESPONSE_EMSG;
 		sfe_incr_exceptions(SFE_EXCEPTION_PROTOCOL_NOT_SUPPORT);
@@ -1039,6 +1051,9 @@
 	case IPPROTO_GRE:
 		break;
 
+	case IPPROTO_ESP:
+		break;
+
 	default:
 		ret = SFE_CMN_RESPONSE_EMSG;
 		sfe_incr_exceptions(SFE_EXCEPTION_PROTOCOL_NOT_SUPPORT);
diff --git a/qca-nss-sfe/sfe_ipv4.c b/qca-nss-sfe/sfe_ipv4.c
index 3650022..48eec72 100644
--- a/qca-nss-sfe/sfe_ipv4.c
+++ b/qca-nss-sfe/sfe_ipv4.c
@@ -45,6 +45,7 @@
 #include "sfe_ipv4_icmp.h"
 #include "sfe_pppoe.h"
 #include "sfe_ipv4_gre.h"
+#include "sfe_ipv4_esp.h"
 
 static char *sfe_ipv4_exception_events_string[SFE_IPV4_EXCEPTION_EVENT_LAST] = {
 	"UDP_HEADER_INCOMPLETE",
@@ -94,7 +95,11 @@
 	"GRE_NO_CONNECTION",
 	"GRE_IP_OPTIONS_OR_INITIAL_FRAGMENT",
 	"GRE_SMALL_TTL",
-	"GRE_NEEDS_FRAGMENTATION"
+	"GRE_NEEDS_FRAGMENTATION",
+	"ESP_NO_CONNECTION",
+	"ESP_IP_OPTIONS_OR_INITIAL_FRAGMENT",
+	"ESP_NEEDS_FRAGMENTATION",
+	"ESP_SMALL_TTL"
 };
 
 static struct sfe_ipv4 __si;
@@ -871,6 +876,10 @@
 		return sfe_ipv4_recv_tcp(si, skb, dev, len, iph, ihl, sync_on_find, l2_info);
 	}
 
+	if (IPPROTO_ESP == protocol) {
+		return sfe_ipv4_recv_esp(si, skb, dev, len, iph, ihl, sync_on_find, tun_outer);
+	}
+
 	if (IPPROTO_ICMP == protocol) {
 		return sfe_ipv4_recv_icmp(si, skb, dev, len, iph, ihl);
 	}
@@ -1270,7 +1279,8 @@
 		}
 	}
 
-	if ((IPPROTO_GRE == tuple->protocol) && !sfe_ipv4_is_local_ip(si, original_cm->match_dest_ip)) {
+	if (((IPPROTO_GRE == tuple->protocol) || (IPPROTO_ESP == tuple->protocol)) &&
+					!sfe_ipv4_is_local_ip(si, original_cm->match_dest_ip)) {
 		original_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_PASSTHROUGH;
 	}
 
@@ -1450,7 +1460,8 @@
 		reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT_DEV_ADMISSION;
 	}
 
-	if ((IPPROTO_GRE == tuple->protocol) && !sfe_ipv4_is_local_ip(si, reply_cm->match_dest_ip)) {
+	if (((IPPROTO_GRE == tuple->protocol) || (IPPROTO_ESP == tuple->protocol)) &&
+					!sfe_ipv4_is_local_ip(si, reply_cm->match_dest_ip)) {
 		reply_cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_PASSTHROUGH;
 	}
 
@@ -1469,11 +1480,11 @@
 	 * which will be released in sfe_ipv4_free_connection_rcu()
 	 */
 #if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 13, 0))
-	sk = __udp4_lib_lookup(net, reply_cm->match_dest_ip, reply_cm->match_dest_port,
-			reply_cm->xlate_src_ip, reply_cm->xlate_src_port, src_if_idx, &udp_table);
+	sk = __udp4_lib_lookup(net, reply_cm->xlate_src_ip, reply_cm->xlate_src_port,
+			reply_cm->match_dest_ip, reply_cm->match_dest_port, src_if_idx, &udp_table);
 #else
-	sk = __udp4_lib_lookup(net, reply_cm->match_dest_ip, reply_cm->match_dest_port,
-			reply_cm->xlate_src_ip, reply_cm->xlate_src_port, src_if_idx, 0, &udp_table, NULL);
+	sk = __udp4_lib_lookup(net, reply_cm->xlate_src_ip, reply_cm->xlate_src_port,
+			reply_cm->match_dest_ip, reply_cm->match_dest_port, src_if_idx, 0, &udp_table, NULL);
 #endif
 
 	rcu_read_unlock();
@@ -1560,6 +1571,22 @@
 	}
 #endif
 
+	if ((IPPROTO_ESP == tuple->protocol) && !(reply_cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_PASSTHROUGH)) {
+		rcu_read_lock();
+		reply_cm->proto = rcu_dereference(inet_protos[IPPROTO_ESP]);
+		rcu_read_unlock();
+
+		if (unlikely(!reply_cm->proto)) {
+			kfree(reply_cm);
+			kfree(original_cm);
+			kfree(c);
+			dev_put(src_dev);
+			dev_put(dest_dev);
+			DEBUG_WARN("sfe: ESP proto handler is not registered\n");
+			return -EPERM;
+		}
+	}
+
 #ifdef CONFIG_NF_FLOW_COOKIE
 	reply_cm->flow_cookie = 0;
 #endif
diff --git a/qca-nss-sfe/sfe_ipv4.h b/qca-nss-sfe/sfe_ipv4.h
index 4e8169b..fcbc09b 100644
--- a/qca-nss-sfe/sfe_ipv4.h
+++ b/qca-nss-sfe/sfe_ipv4.h
@@ -288,6 +288,10 @@
 	SFE_IPV4_EXCEPTION_EVENT_GRE_IP_OPTIONS_OR_INITIAL_FRAGMENT,
 	SFE_IPV4_EXCEPTION_EVENT_GRE_SMALL_TTL,
 	SFE_IPV4_EXCEPTION_EVENT_GRE_NEEDS_FRAGMENTATION,
+	SFE_IPV4_EXCEPTION_EVENT_ESP_NO_CONNECTION,
+	SFE_IPV4_EXCEPTION_EVENT_ESP_IP_OPTIONS_OR_INITIAL_FRAGMENT,
+	SFE_IPV4_EXCEPTION_EVENT_ESP_NEEDS_FRAGMENTATION,
+	SFE_IPV4_EXCEPTION_EVENT_ESP_SMALL_TTL,
 	SFE_IPV4_EXCEPTION_EVENT_LAST
 };
 
diff --git a/qca-nss-sfe/sfe_ipv4_esp.c b/qca-nss-sfe/sfe_ipv4_esp.c
new file mode 100644
index 0000000..f8ebe51
--- /dev/null
+++ b/qca-nss-sfe/sfe_ipv4_esp.c
@@ -0,0 +1,309 @@
+/*
+ * sfe_ipv4_esp.c
+ *	Shortcut forwarding engine - IPv4 ESP implementation
+ *
+ * Copyright (c) 2022, 2024 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+#include <net/protocol.h>
+#include <net/ip.h>
+#include <linux/etherdevice.h>
+#include <linux/lockdep.h>
+
+#include "sfe_debug.h"
+#include "sfe_api.h"
+#include "sfe.h"
+#include "sfe_flow_cookie.h"
+#include "sfe_ipv4.h"
+#include "sfe_ipv4_esp.h"
+
+/*
+ * sfe_ipv4_recv_esp()
+ *	Handle ESP packet receives and forwarding
+ */
+int sfe_ipv4_recv_esp(struct sfe_ipv4 *si, struct sk_buff *skb, struct net_device *dev,
+				unsigned int len, struct iphdr *iph, unsigned int ihl,
+				bool sync_on_find, bool tun_outer)
+{
+	struct sfe_ipv4_connection_match *cm;
+	struct net_device *xmit_dev;
+	struct net_protocol *ipprot;
+	netdev_features_t features;
+	bool passthrough;
+	bool bridge_flow;
+	bool fast_xmit;
+	bool hw_csum;
+	__be32 src_ip;
+	__be32 dest_ip;
+	bool ret;
+	u8 ttl;
+
+	/*
+	 * Read the IP address from the iphdr, and set the src/dst ports to 0.
+	 */
+	src_ip = iph->saddr;
+	dest_ip = iph->daddr;
+	rcu_read_lock();
+
+	/*
+	 * Look for a connection match.
+	 */
+#ifdef CONFIG_NF_FLOW_COOKIE
+	cm = si->sfe_flow_cookie_table[skb->flow_cookie & SFE_FLOW_COOKIE_MASK].match;
+	if (unlikely(!cm)) {
+		cm = sfe_ipv4_find_ipv4_connection_match_rcu(si, dev, IPPROTO_ESP, src_ip, 0, dest_ip, 0);
+	}
+#else
+	cm = sfe_ipv4_find_connection_match_rcu(si, dev, IPPROTO_ESP, src_ip, 0, dest_ip, 0);
+#endif
+	if (unlikely(!cm)) {
+		rcu_read_unlock();
+		sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_ESP_NO_CONNECTION);
+		DEBUG_TRACE("no connection found for esp packet\n");
+		return 0;
+	}
+
+	/*
+	 * Source interface validate.
+	 */
+	if (unlikely((cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK) && (cm->match_dev != dev))) {
+		struct sfe_ipv4_connection *c = cm->connection;
+		int ret;
+
+		spin_lock_bh(&si->lock);
+		ret = sfe_ipv4_remove_connection(si, c);
+		spin_unlock_bh(&si->lock);
+
+		if (ret) {
+			sfe_ipv4_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
+		}
+		rcu_read_unlock();
+		sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_INVALID_SRC_IFACE);
+		DEBUG_TRACE("flush on wrong source interface check failure\n");
+		return 0;
+	}
+
+	passthrough = cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_PASSTHROUGH;
+	bridge_flow = !!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_BRIDGE_FLOW);
+
+	/*
+	 * If our packet has been marked as "sync on find" we can't actually
+	 * forward it in the fast path, but now that we've found an associated
+	 * connection we need sync its status before exception it to slow path unless
+	 * it is passthrough (packets not directed to DUT) packet.
+	 * TODO: revisit to ensure that pass through traffic is not bypassing firewall for fragmented cases
+	 */
+	if (unlikely(sync_on_find) && !passthrough) {
+		sfe_ipv4_sync_status(si, cm->connection, SFE_SYNC_REASON_STATS);
+		rcu_read_unlock();
+		sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_ESP_IP_OPTIONS_OR_INITIAL_FRAGMENT);
+		DEBUG_TRACE("%px: sfe: sync on find\n", cm);
+		return 0;
+	}
+
+	/*
+	 * Check if skb was cloned. If it was, unshare it.
+	 */
+	if (unlikely(skb_cloned(skb))) {
+		DEBUG_TRACE("%px: skb is a cloned skb\n", skb);
+		skb = skb_unshare(skb, GFP_ATOMIC);
+		if (!skb) {
+			DEBUG_WARN("Failed to unshare the cloned skb\n");
+			rcu_read_unlock();
+			return 0;
+		}
+
+		/*
+		 * Update the iphdr pointer with the unshared skb's data area.
+		 */
+		iph = (struct iphdr *)skb->data;
+	}
+
+	/*
+	 * Enable HW csum if rx checksum is verified and xmit interface is CSUM offload capable.
+	 */
+	hw_csum = !!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_CSUM_OFFLOAD) && (skb->ip_summed == CHECKSUM_UNNECESSARY);
+
+	/*
+	 * proto decap packet.
+	 *	Invoke the inet_protocol handler for delivery of the packet.
+	 */
+	ipprot = rcu_dereference(cm->proto);
+	if (likely(ipprot)) {
+		skb_reset_network_header(skb);
+		skb_pull(skb, ihl);
+		skb_reset_transport_header(skb);
+		xmit_dev = cm->xmit_dev;
+		skb->dev = xmit_dev;
+
+		ret = ipprot->handler(skb);
+		if (ret) {
+			rcu_read_unlock();
+			this_cpu_inc(si->stats_pcpu->packets_not_forwarded64);
+			DEBUG_TRACE("ESP handler returned error %u\n", ret);
+			return 0;
+		}
+
+		/*
+		 * Update traffic stats.
+		 */
+		atomic_inc(&cm->rx_packet_count);
+		atomic_add(len, &cm->rx_byte_count);
+
+		rcu_read_unlock();
+		this_cpu_inc(si->stats_pcpu->packets_forwarded64);
+		return 1;
+	}
+
+	/*
+	 * esp passthrough / ip local out scenarios.
+	 */
+	/*
+	 * If our packet is larger than the MTU of the transmit interface then
+	 * we can't forward it easily.
+	 */
+	if (unlikely(len > cm->xmit_dev_mtu)) {
+		sfe_ipv4_sync_status(si, cm->connection, SFE_SYNC_REASON_STATS);
+		rcu_read_unlock();
+		sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_ESP_NEEDS_FRAGMENTATION);
+		DEBUG_TRACE("%px: sfe: larger than MTU\n", cm);
+		return 0;
+	}
+
+	/*
+	 * Translate Source IP when NATT is involved.
+	 */
+	if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_XLATE_SRC)) {
+		iph->saddr = cm->xlate_src_ip;
+	}
+
+	/*
+	 * Translate destination IP when NATT is involved.
+	 */
+	if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_XLATE_DEST)) {
+		iph->daddr = cm->xlate_dest_ip;
+	}
+
+	/*
+	 * need to ensure that TTL is >=2.
+	 */
+	ttl = iph->ttl;
+	if (!bridge_flow && (ttl < 2) && passthrough) {
+		sfe_ipv4_sync_status(si, cm->connection, SFE_SYNC_REASON_STATS);
+		rcu_read_unlock();
+
+		DEBUG_TRACE("%px: sfe: TTL too low\n", skb);
+		sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_ESP_SMALL_TTL);
+		return 0;
+	}
+
+	/*
+	 * decrement TTL by 1.
+	 */
+	iph->ttl = (ttl - (u8)(!bridge_flow && !tun_outer));
+
+	/*
+	 * Update DSCP
+	 */
+	if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_DSCP_REMARK)) {
+		iph->tos = (iph->tos & SFE_IPV4_DSCP_MASK) | cm->dscp;
+	}
+
+	/*
+	 * Replace the IP checksum.
+	 */
+	if (likely(hw_csum)) {
+		skb->ip_summed = CHECKSUM_PARTIAL;
+	} else {
+		iph->check = sfe_ipv4_gen_ip_csum(iph);
+	}
+
+	/*
+	 * Update traffic stats.
+	 */
+	atomic_inc(&cm->rx_packet_count);
+	atomic_add(len, &cm->rx_byte_count);
+
+	xmit_dev = cm->xmit_dev;
+	skb->dev = xmit_dev;
+
+	/*
+	 * write the layer - 2 header.
+	 */
+	if (likely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_WRITE_L2_HDR)) {
+		if (unlikely(!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_WRITE_FAST_ETH_HDR))) {
+			dev_hard_header(skb, xmit_dev, ETH_P_IP, cm->xmit_dest_mac, cm->xmit_src_mac, len);
+		} else {
+			/*
+			 * For the simple case we write this really fast.
+			 */
+			struct ethhdr *eth = (struct ethhdr *)__skb_push(skb, ETH_HLEN);
+			eth->h_proto = htons(ETH_P_IP);
+			ether_addr_copy((u8 *)eth->h_dest, (u8 *)cm->xmit_dest_mac);
+			ether_addr_copy((u8 *)eth->h_source, (u8 *)cm->xmit_src_mac);
+		}
+	}
+
+	/*
+	 * Update priority of skb
+	 */
+	if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_PRIORITY_REMARK)) {
+		skb->priority = cm->priority;
+	}
+
+	/*
+	 * Mark outgoing packet.
+	 */
+	if (unlikely(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_MARK)) {
+		skb->mark = cm->mark;
+	}
+
+	/*
+	 * For the first packets, check if it could got fast xmit.
+	 */
+	if (unlikely(!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT_FLOW_CHECKED)
+				&& (cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT_DEV_ADMISSION))){
+		cm->features = netif_skb_features(skb);
+		if (likely(sfe_fast_xmit_check(skb, cm->features))) {
+			cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT;
+		}
+		cm->flags |= SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT_FLOW_CHECKED;
+	}
+
+	features = cm->features;
+	fast_xmit = !!(cm->flags & SFE_IPV4_CONNECTION_MATCH_FLAG_FAST_XMIT);
+
+	rcu_read_unlock();
+	this_cpu_inc(si->stats_pcpu->packets_forwarded64);
+	prefetch(skb_shinfo(skb));
+
+	/*
+	 * We do per packet condition check before we could fast xmit the
+	 * packet.
+	 */
+	if (likely(fast_xmit && dev_fast_xmit(skb, xmit_dev, features))) {
+		this_cpu_inc(si->stats_pcpu->packets_fast_xmited64);
+		return 1;
+	}
+
+	/*
+	 * Mark that this packet has been fast forwarded.
+	 */
+	skb->fast_forwarded = 1;
+
+	dev_queue_xmit(skb);
+	return 1;
+}
diff --git a/qca-nss-sfe/sfe_ipv4_esp.h b/qca-nss-sfe/sfe_ipv4_esp.h
new file mode 100644
index 0000000..f889605
--- /dev/null
+++ b/qca-nss-sfe/sfe_ipv4_esp.h
@@ -0,0 +1,21 @@
+/*
+ * sfe_ipv4_esp.h
+ *	Shortcut forwarding engine - IPv4 ESP header file
+ *
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+int sfe_ipv4_recv_esp(struct sfe_ipv4 *si, struct sk_buff *skb, struct net_device *dev, unsigned int len,
+			struct iphdr *iph, unsigned int ihl, bool sync_on_find, bool tun_outer);
diff --git a/qca-nss-sfe/sfe_ipv4_udp.c b/qca-nss-sfe/sfe_ipv4_udp.c
index 4b15f7c..3fff836 100644
--- a/qca-nss-sfe/sfe_ipv4_udp.c
+++ b/qca-nss-sfe/sfe_ipv4_udp.c
@@ -3,7 +3,7 @@
  *	Shortcut forwarding engine - IPv4 UDP implementation
  *
  * Copyright (c) 2013-2016, 2019-2020, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -47,6 +47,8 @@
 	struct sock *sk;
 	int ret;
 	int (*encap_rcv)(struct sock *sk, struct sk_buff *skb);
+	struct udphdr *uh;
+	unsigned short ulen;
 
 	/*
 	 * Call the decap handler for valid encap_rcv handler.
@@ -63,27 +65,44 @@
 #else
 	nf_reset_ct(skb);
 #endif
-
 	skb_pull(skb, ihl);
 	skb_reset_transport_header(skb);
+	sk = (struct sock *)up;
+
+	uh = udp_hdr(skb);
+	ulen = ntohs(uh->len);
+	if (ulen < sizeof(*uh) || pskb_trim_rcsum(skb, ulen)) {
+		DEBUG_TRACE("%px: short packet", skb);
+		goto except;
+	}
+	uh = udp_hdr(skb);
 
 	/*
 	 * Verify checksum before giving to encap_rcv handler function.
 	 * TODO: The following approach is ignorant for UDPLITE for now.
 	 * Instead, consider calling Linux API to do checksum validation.
 	 */
-	if (unlikely(skb->ip_summed != CHECKSUM_UNNECESSARY) && unlikely(skb->ip_summed != CHECKSUM_COMPLETE)) {
-		skb->csum = inet_compute_pseudo(skb, IPPROTO_UDP);
-		if (unlikely(__skb_checksum_complete(skb))) {
-			DEBUG_ERROR("%px: sfe: Invalid udp checksum\n", skb);
-			kfree_skb(skb);
-			return -1;
+	if (unlikely(skb->ip_summed != CHECKSUM_UNNECESSARY && skb->ip_summed != CHECKSUM_COMPLETE) && uh->check) {
+
+		UDP_SKB_CB(skb)->partial_cov = 0;
+		UDP_SKB_CB(skb)->cscov = skb->len;
+
+		if (skb_checksum_init(skb, IPPROTO_UDP, inet_compute_pseudo)) {
+			DEBUG_TRACE("%px: checksum initilization failed", skb);
+			goto except;
+		}
+
+		if (inet_get_convert_csum(sk)) {
+			skb_checksum_try_convert(skb, IPPROTO_UDP, inet_compute_pseudo);
+		}
+
+		if (udp_lib_checksum_complete(skb)) {
+			DEBUG_TRACE("%px: udp checksum validation failed", skb);
+			goto except;
 		}
 		DEBUG_TRACE("%px: sfe: udp checksum verified in s/w correctly.\n", skb);
 	}
 
-	sk = (struct sock *)up;
-
 	/*
 	 * At this point, L4 checksum has already been verified and pkt is going
 	 * to Linux's tunnel decap-handler. Setting ip_summed field to CHECKSUM_NONE,
@@ -100,16 +119,19 @@
 	 */
 	ret = encap_rcv(sk, skb);
 	if (unlikely(ret)) {
-		/*
-		 * If encap_rcv fails, vxlan driver drops the packet.
-		 * No need to free the skb here.
-		 */
-
-		DEBUG_ERROR("%px: sfe: udp-decap API return error: %d\n", skb, ret);
-		return -1;
+		DEBUG_TRACE("%px: sfe: udp-decap API return error: %d\n", skb, ret);
+		goto except;
 	}
 
 	return 0;
+
+except:
+	/*
+	 * The packet could be restored with the original L2 Information for L2
+	 * flow, but it couldn't restore the NATed IP in the packets.
+	 */
+	skb_push(skb, ihl);
+	return 1;
 }
 
 /*
@@ -263,7 +285,7 @@
 	 * If our packet is larger than the MTU of the transmit interface then
 	 * we can't forward it easily.
 	 */
-	if (unlikely(len > cm->xmit_dev_mtu)) {
+	if (unlikely((len > cm->xmit_dev_mtu) && (!cm->up))) {
 		sfe_ipv4_sync_status(si, cm->connection, SFE_SYNC_REASON_STATS);
 		rcu_read_unlock();
 		sfe_ipv4_exception_stats_inc(si, SFE_IPV4_EXCEPTION_EVENT_UDP_NEEDS_FRAGMENTATION);
@@ -442,11 +464,7 @@
 		 * Also validates UDP checksum before calling decap handler.
 		 */
 		err = sfe_ipv4_udp_sk_deliver(skb, cm, ihl);
-		if (unlikely(err == -1)) {
-			rcu_read_unlock();
-			this_cpu_inc(si->stats_pcpu->packets_dropped64);
-			return 1;
-		} else if (unlikely(err == 1)) {
+		if (unlikely(err == 1)) {
 			rcu_read_unlock();
 			this_cpu_inc(si->stats_pcpu->packets_not_forwarded64);
 			return 0;
diff --git a/qca-nss-sfe/sfe_ipv6.c b/qca-nss-sfe/sfe_ipv6.c
index 9670c7e..9b9539f 100644
--- a/qca-nss-sfe/sfe_ipv6.c
+++ b/qca-nss-sfe/sfe_ipv6.c
@@ -3,7 +3,7 @@
  *	Shortcut forwarding engine - IPv6 support.
  *
  * Copyright (c) 2015-2016, 2019-2020, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -46,6 +46,7 @@
 #include "sfe_pppoe.h"
 #include "sfe_ipv6_tunipip6.h"
 #include "sfe_ipv6_gre.h"
+#include "sfe_ipv6_esp.h"
 
 #define sfe_ipv6_addr_copy(src, dest) memcpy((void *)(dest), (void *)(src), 16)
 
@@ -103,7 +104,11 @@
 	"GRE_NO_CONNECTION",
 	"GRE_IP_OPTIONS_OR_INITIAL_FRAGMENT",
 	"GRE_SMALL_TTL",
-	"GRE_NEEDS_FRAGMENTATION"
+	"GRE_NEEDS_FRAGMENTATION",
+	"ESP_NO_CONNECTION",
+	"ESP_IP_OPTIONS_OR_INITIAL_FRAGMENT",
+	"ESP_NEEDS_FRAGMENTATION",
+	"ESP_SMALL_TTL"
 };
 
 static struct sfe_ipv6 __si6;
@@ -863,6 +868,10 @@
 		return sfe_ipv6_recv_tcp(si, skb, dev, len, iph, ihl, sync_on_find, l2_info);
 	}
 
+	if (IPPROTO_ESP == next_hdr) {
+		return sfe_ipv6_recv_esp(si, skb, dev, len, iph, ihl, sync_on_find, tun_outer);
+	}
+
 	if (IPPROTO_ICMPV6 == next_hdr) {
 		return sfe_ipv6_recv_icmp(si, skb, dev, len, iph, ihl);
 	}
@@ -1536,9 +1545,9 @@
 	reply_cm->top_interface_dev = NULL;
 
 #ifdef SFE_GRE_TUN_ENABLE
-	if (!(reply_cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_PASSTHROUGH)) {
+	if ((IPPROTO_GRE == tuple->protocol) && !(reply_cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_PASSTHROUGH)) {
 		rcu_read_lock();
-		reply_cm->proto = rcu_dereference(inet6_protos[tuple->protocol]);
+		reply_cm->proto = rcu_dereference(inet6_protos[IPPROTO_GRE]);
 		rcu_read_unlock();
 
 		if (unlikely(!reply_cm->proto)) {
@@ -1555,6 +1564,24 @@
 	}
 #endif
 
+	if ((IPPROTO_ESP == tuple->protocol) && !(reply_cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_PASSTHROUGH)) {
+		rcu_read_lock();
+		reply_cm->proto = rcu_dereference(inet6_protos[IPPROTO_ESP]);
+		rcu_read_unlock();
+
+		if (unlikely(!reply_cm->proto)) {
+			this_cpu_inc(si->stats_pcpu->connection_create_failures64);
+			spin_unlock_bh(&si->lock);
+			kfree(reply_cm);
+			kfree(original_cm);
+			kfree(c);
+			dev_put(src_dev);
+			dev_put(dest_dev);
+			DEBUG_WARN("sfe: ESP proto handler is not registered\n");
+			return -EPERM;
+		}
+	}
+
 	/*
 	 * Decapsulation path have proto set.
 	 * This is used to differentiate de/encap, and call protocol specific handler.
diff --git a/qca-nss-sfe/sfe_ipv6.h b/qca-nss-sfe/sfe_ipv6.h
index 9c78f1c..f9a33f8 100644
--- a/qca-nss-sfe/sfe_ipv6.h
+++ b/qca-nss-sfe/sfe_ipv6.h
@@ -307,6 +307,10 @@
 	SFE_IPV6_EXCEPTION_EVENT_GRE_IP_OPTIONS_OR_INITIAL_FRAGMENT,
 	SFE_IPV6_EXCEPTION_EVENT_GRE_SMALL_TTL,
 	SFE_IPV6_EXCEPTION_EVENT_GRE_NEEDS_FRAGMENTATION,
+	SFE_IPV6_EXCEPTION_EVENT_ESP_NO_CONNECTION,
+	SFE_IPV6_EXCEPTION_EVENT_ESP_IP_OPTIONS_OR_INITIAL_FRAGMENT,
+	SFE_IPV6_EXCEPTION_EVENT_ESP_NEEDS_FRAGMENTATION,
+	SFE_IPV6_EXCEPTION_EVENT_ESP_SMALL_TTL,
 	SFE_IPV6_EXCEPTION_EVENT_LAST
 };
 
diff --git a/qca-nss-sfe/sfe_ipv6_esp.c b/qca-nss-sfe/sfe_ipv6_esp.c
new file mode 100644
index 0000000..7a152e8
--- /dev/null
+++ b/qca-nss-sfe/sfe_ipv6_esp.c
@@ -0,0 +1,275 @@
+/*
+ * sfe_ipv6_esp.c
+ *	Shortcut forwarding engine - IPv6 ESP implementation
+ *
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/skbuff.h>
+#include <net/protocol.h>
+#include <net/ip6_checksum.h>
+#include <linux/etherdevice.h>
+#include <linux/version.h>
+
+#include "sfe_debug.h"
+#include "sfe_api.h"
+#include "sfe.h"
+#include "sfe_flow_cookie.h"
+#include "sfe_ipv6.h"
+#include "sfe_ipv6_esp.h"
+
+/*
+ * sfe_ipv6_recv_esp()
+ *	Handle ESP packet receives and forwarding
+ */
+int sfe_ipv6_recv_esp(struct sfe_ipv6 *si, struct sk_buff *skb, struct net_device *dev,
+				unsigned int len, struct ipv6hdr *iph, unsigned int ihl,
+				bool sync_on_find, bool tun_outer)
+{
+	struct sfe_ipv6_connection_match *cm;
+	struct sfe_ipv6_addr *src_ip;
+	struct sfe_ipv6_addr *dest_ip;
+	struct net_device *xmit_dev;
+	struct inet6_protocol *ipprot;
+	netdev_features_t features;
+	bool bridge_flow;
+	bool passthrough;
+	bool fast_xmit;
+	bool ret;
+
+	/*
+	 * Read the IP address from the iphdr, and set the src/dst ports to 0.
+	 */
+	src_ip = (struct sfe_ipv6_addr *)iph->saddr.s6_addr32;
+	dest_ip = (struct sfe_ipv6_addr *)iph->daddr.s6_addr32;
+	rcu_read_lock();
+
+	/*
+	 * Look for a connection match.
+	 */
+#ifdef CONFIG_NF_FLOW_COOKIE
+	cm = si->sfe_flow_cookie_table[skb->flow_cookie & SFE_FLOW_COOKIE_MASK].match;
+	if (unlikely(!cm)) {
+		cm = sfe_ipv6_find_connection_match_rcu(si, dev, IPPROTO_ESP, src_ip, 0, dest_ip, 0);
+	}
+#else
+	cm = sfe_ipv6_find_connection_match_rcu(si, dev, IPPROTO_ESP, src_ip, 0, dest_ip, 0);
+#endif
+	if (unlikely(!cm)) {
+		rcu_read_unlock();
+		sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_ESP_NO_CONNECTION);
+
+		DEBUG_TRACE("no connection found for esp packet\n");
+		return 0;
+	}
+
+	/*
+	 * Source interface validate.
+	 */
+	if (unlikely((cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_SRC_INTERFACE_CHECK) && (cm->match_dev != dev))) {
+		struct sfe_ipv6_connection *c = cm->connection;
+		int ret;
+
+		spin_lock_bh(&si->lock);
+		ret = sfe_ipv6_remove_connection(si, c);
+		spin_unlock_bh(&si->lock);
+
+		if (ret) {
+			sfe_ipv6_flush_connection(si, c, SFE_SYNC_REASON_FLUSH);
+		}
+		rcu_read_unlock();
+		sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_INVALID_SRC_IFACE);
+		DEBUG_TRACE("flush on wrong source interface check failure\n");
+		return 0;
+	}
+
+	passthrough = cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_PASSTHROUGH;
+	bridge_flow = !!(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_BRIDGE_FLOW);
+
+	/*
+	 * If our packet has beern marked as "sync on find" we can't actually
+	 * forward it in the fast path, but now that we've found an associated
+	 * connection we need sync its status before exception it to slow path. unless
+	 * it is passthrough packet.
+	 * TODO: revisit to ensure that pass through traffic is not bypassing firewall for fragmented cases
+	 */
+	if (unlikely(sync_on_find) && !passthrough) {
+		sfe_ipv6_sync_status(si, cm->connection, SFE_SYNC_REASON_STATS);
+		rcu_read_unlock();
+
+		sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_ESP_IP_OPTIONS_OR_INITIAL_FRAGMENT);
+		DEBUG_TRACE("Sync on find\n");
+		return 0;
+	}
+
+	/*
+	 * Check if skb was cloned. If it was, unshare it.
+	 */
+	if (unlikely(skb_cloned(skb))) {
+		DEBUG_TRACE("%px: skb is a cloned skb\n", skb);
+		skb = skb_unshare(skb, GFP_ATOMIC);
+		if (!skb) {
+			DEBUG_WARN("Failed to unshare the cloned skb\n");
+			rcu_read_unlock();
+			return 0;
+		}
+
+		/*
+		 * Update the iphdr pointer with the unshared skb's data area.
+		 */
+		iph = (struct ipv6hdr *)skb->data;
+	}
+
+	/*
+	 * proto decap packet.
+	 *	Invoke the inet_protocol handler for delivery of the packet.
+	 */
+	ipprot = rcu_dereference(cm->proto);
+	if (likely(ipprot)) {
+		skb_reset_network_header(skb);
+		skb_pull(skb, ihl);
+		skb_reset_transport_header(skb);
+		xmit_dev = cm->xmit_dev;
+		skb->dev = xmit_dev;
+
+		ret = ipprot->handler(skb);
+		if (ret) {
+			rcu_read_unlock();
+			this_cpu_inc(si->stats_pcpu->packets_not_forwarded64);
+			DEBUG_TRACE("ESP handler returned error %u\n", ret);
+			return 0;
+		}
+
+		rcu_read_unlock();
+		this_cpu_inc(si->stats_pcpu->packets_forwarded64);
+		return 1;
+	}
+
+	/*
+	 * esp passthrough / ip local out scenarios
+	 */
+	/*
+	 * If our packet is larger than the MTU of the transmit interface then
+	 * we can't forward it easily.
+	 */
+	if (unlikely(len > cm->xmit_dev_mtu)) {
+		sfe_ipv6_sync_status(si, cm->connection, SFE_SYNC_REASON_STATS);
+		rcu_read_unlock();
+
+		sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_ESP_NEEDS_FRAGMENTATION);
+		DEBUG_TRACE("Larger than MTU\n");
+		return 0;
+	}
+
+	/*
+	 * need to ensure that TTL is >=2.
+	 */
+	if (!bridge_flow && (iph->hop_limit < 2) && passthrough) {
+		sfe_ipv6_sync_status(si, cm->connection, SFE_SYNC_REASON_STATS);
+		rcu_read_unlock();
+
+		sfe_ipv6_exception_stats_inc(si, SFE_IPV6_EXCEPTION_EVENT_ESP_SMALL_TTL);
+		DEBUG_TRACE("hop_limit too low\n");
+		return 0;
+	}
+
+	/*
+	 * decrement TTL by 1.
+	 */
+	iph->hop_limit = iph->hop_limit - (u8)(!bridge_flow && !tun_outer);
+
+	/*
+	 * Update DSCP
+	 */
+	if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_DSCP_REMARK)) {
+		sfe_ipv6_change_dsfield(iph, cm->dscp);
+	}
+
+	/*
+	 * Update traffic stats.
+	 */
+	atomic_inc(&cm->rx_packet_count);
+	atomic_add(len, &cm->rx_byte_count);
+
+	xmit_dev = cm->xmit_dev;
+	skb->dev = xmit_dev;
+
+	/*
+	 * write the layer - 2 header.
+	 */
+	if (likely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_WRITE_L2_HDR)) {
+		if (unlikely(!(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_WRITE_FAST_ETH_HDR))) {
+			dev_hard_header(skb, xmit_dev, ETH_P_IPV6, cm->xmit_dest_mac, cm->xmit_src_mac, len);
+		} else {
+			/*
+			 * For the simple case we write this really fast.
+			 */
+			struct ethhdr *eth = (struct ethhdr *)__skb_push(skb, ETH_HLEN);
+			eth->h_proto = htons(ETH_P_IPV6);
+			ether_addr_copy((u8 *)eth->h_dest, (u8 *)cm->xmit_dest_mac);
+			ether_addr_copy((u8 *)eth->h_source, (u8 *)cm->xmit_src_mac);
+		}
+	}
+
+	/*
+	 * Update priority of skb.
+	 */
+	if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_PRIORITY_REMARK)) {
+		skb->priority = cm->priority;
+	}
+
+	/*
+	 * Mark outgoing packet.
+	 */
+	if (unlikely(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_MARK)) {
+		skb->mark = cm->mark;
+	}
+
+	/*
+	 * For the first packets, check if it could got fast xmit.
+	 */
+	if (unlikely(!(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT_FLOW_CHECKED)
+				&& (cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT_DEV_ADMISSION))){
+		cm->features = netif_skb_features(skb);
+		if (likely(sfe_fast_xmit_check(skb, cm->features))) {
+			cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT;
+		}
+		cm->flags |= SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT_FLOW_CHECKED;
+	}
+
+	features = cm->features;
+	fast_xmit = !!(cm->flags & SFE_IPV6_CONNECTION_MATCH_FLAG_FAST_XMIT);
+
+	rcu_read_unlock();
+	this_cpu_inc(si->stats_pcpu->packets_forwarded64);
+	prefetch(skb_shinfo(skb));
+
+	/*
+	 * We do per packet condition check before we could fast xmit the
+	 * packet.
+	 */
+	if (likely(fast_xmit && dev_fast_xmit(skb, xmit_dev, features))) {
+		this_cpu_inc(si->stats_pcpu->packets_fast_xmited64);
+		return 1;
+	}
+
+	/*
+	 * Mark that this packet has been fast forwarded.
+	 */
+	skb->fast_forwarded = 1;
+
+	dev_queue_xmit(skb);
+	return 1;
+}
diff --git a/qca-nss-sfe/sfe_ipv6_esp.h b/qca-nss-sfe/sfe_ipv6_esp.h
new file mode 100644
index 0000000..2870670
--- /dev/null
+++ b/qca-nss-sfe/sfe_ipv6_esp.h
@@ -0,0 +1,21 @@
+/*
+ * sfe_ipv6_esp.h
+ *	Shortcut forwarding engine - IPv6 ESP header file
+ *
+ * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+int sfe_ipv6_recv_esp(struct sfe_ipv6 *si, struct sk_buff *skb, struct net_device *dev, unsigned int len,
+			struct ipv6hdr *iph, unsigned int ihl, bool sync_on_find, bool tun_outer);
diff --git a/qca-nss-sfe/sfe_ipv6_udp.c b/qca-nss-sfe/sfe_ipv6_udp.c
index 445b43f..74802eb 100644
--- a/qca-nss-sfe/sfe_ipv6_udp.c
+++ b/qca-nss-sfe/sfe_ipv6_udp.c
@@ -3,7 +3,7 @@
  *	Shortcut forwarding engine file for IPv6 UDP
  *
  * Copyright (c) 2015-2016, 2019-2020, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
  *
  * Permission to use, copy, modify, and/or distribute this software for any
  * purpose with or without fee is hereby granted, provided that the above
@@ -269,7 +269,7 @@
 	 * If our packet is larger than the MTU of the transmit interface then
 	 * we can't forward it easily.
 	 */
-	if (unlikely(len > cm->xmit_dev_mtu)) {
+	if (unlikely((len > cm->xmit_dev_mtu) && (!cm->up))) {
 		sfe_ipv6_sync_status(si, cm->connection, SFE_SYNC_REASON_STATS);
 		rcu_read_unlock();
 
diff --git a/qca-nss-sfe/tunnels/ipsec/Makefile b/qca-nss-sfe/tunnels/ipsec/Makefile
new file mode 100644
index 0000000..24d7038
--- /dev/null
+++ b/qca-nss-sfe/tunnels/ipsec/Makefile
@@ -0,0 +1,17 @@
+###################################################
+# Makefile for the NSS EIP IPSEC client
+###################################################
+
+SFE_IPSEC_MOD_NAME=qca-nss-sfe-xfrm
+
+ccflags-y += -Wall -Werror
+ccflags-y += -DNSS_SFE_XFRM_BUILD_ID=\"'Build_ID - $(shell date +'%m/%d/%y, %H:%M:%S') SoC=$(SoC)'\"
+
+obj-m += $(SFE_IPSEC_MOD_NAME).o
+
+$(SFE_IPSEC_MOD_NAME)-objs += sfe_xfrm.o
+$(SFE_IPSEC_MOD_NAME)-objs += sfe_xfrm_ctrl.o
+$(SFE_IPSEC_MOD_NAME)-objs += sfe_xfrm_enc.o
+$(SFE_IPSEC_MOD_NAME)-objs += sfe_xfrm_dec.o
+
+obj ?= .
diff --git a/qca-nss-sfe/tunnels/ipsec/sfe_xfrm.c b/qca-nss-sfe/tunnels/ipsec/sfe_xfrm.c
new file mode 100644
index 0000000..7e2405f
--- /dev/null
+++ b/qca-nss-sfe/tunnels/ipsec/sfe_xfrm.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/if.h>
+#include <linux/list.h>
+#include <linux/completion.h>
+#include <linux/debugfs.h>
+
+#include "sfe_xfrm.h"
+
+/*
+ * global ipsec instance.
+ */
+struct sfe_xfrm g_sfe_xfrm = {{0}};
+
+/*
+ * sfe_xfrm_init()
+ *	Module initialization
+ */
+int __init sfe_xfrm_init(void)
+{
+	struct sfe_xfrm *g_xfrm = &g_sfe_xfrm;
+	int err = 0;
+
+	/*
+	 * Initialize the global object.
+	 */
+	spin_lock_init(&g_xfrm->lock);
+
+	g_xfrm->sa_cache = kmem_cache_create("sfe_xfrm_sa", sizeof(struct sfe_xfrm_sa), 0, 0, NULL);
+	if (!g_xfrm->sa_cache) {
+		pr_err("%px: Failed to allocate kmem cache for SA\n", g_xfrm);
+		return -1;
+	}
+
+	/*
+	 * Initializes the device db.
+	 */
+	INIT_LIST_HEAD(&g_xfrm->dev_head);
+
+	g_xfrm->dentry = debugfs_create_dir("nss-sfe-xfrm", NULL);
+	if (IS_ERR_OR_NULL(g_xfrm->dentry)) {
+		pr_err("%px: Failed to create debugfs directory\n", g_xfrm);
+		err = -EBADF;
+		goto fail_debugfs;
+	}
+
+	/*
+	 * XFRM offload initialization.
+	 */
+	sfe_xfrm_ctrl_init();
+
+	/*
+	 * Take a self referebce on the module to avoid unload
+	 */
+	__module_get(THIS_MODULE);
+
+	pr_info("sfe ipsec module loaded (%s)\n", NSS_SFE_XFRM_BUILD_ID);
+	return 0;
+
+fail_debugfs:
+	kmem_cache_destroy(g_xfrm->sa_cache);
+	return err;
+}
+
+/*
+ * sfe_xfrm_exit()
+ *	Module exit cleanup
+ */
+void __exit sfe_xfrm_exit(void)
+{
+	BUG_ON(module_refcount(THIS_MODULE));
+}
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("NSS SFE IPsec client");
+
+module_init(sfe_xfrm_init);
+module_exit(sfe_xfrm_exit);
diff --git a/qca-nss-sfe/tunnels/ipsec/sfe_xfrm.h b/qca-nss-sfe/tunnels/ipsec/sfe_xfrm.h
new file mode 100644
index 0000000..3688065
--- /dev/null
+++ b/qca-nss-sfe/tunnels/ipsec/sfe_xfrm.h
@@ -0,0 +1,219 @@
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#ifndef __SFE_XFRM_H
+#define __SFE_XFRM_H
+
+#include <linux/rculist.h>
+#include <linux/netdevice.h>
+#include <linux/ip.h>
+#include <net/esp.h>
+#include <net/udp.h>
+#include <net/xfrm.h>
+#include <net/dst_cache.h>
+#include <crypto/skcipher.h>
+#include <crypto/hash.h>
+#include <crypto/ghash.h>
+
+#define SFE_XFRM_MAX_GCM_PAD 4
+#define SFE_XFRM_MAX_ESP_GCM_OVERHEAD (sizeof(struct ip_esp_hdr) + GCM_RFC4106_IV_SIZE \
+		+ SFE_XFRM_MAX_GCM_PAD + sizeof(struct ip_esp_trailer) \
+		+ GHASH_DIGEST_SIZE)		/* ESP + IV + PAD + TRAILER + ICV */
+#define SFE_XFRM_DEV_MAX_HEADROOM 128		/* Size of the buffer headroom.. */
+#define SFE_XFRM_DEV_MAX_TAILROOM 192		/* Size of the buffer tailroom. */
+#define SFE_XFRM_DEV_ARPHRD 31			/* ARP (iana.org) hardware type for an IPsec tunnel. */
+#define SFE_XFRM_MAX_STR_LEN 64			/* Maximum print lenght */
+
+/*
+ * SA flags.
+ */
+#define SFE_XFRM_SA_FLAG_ENC		BIT(1)	/* Encapsulation or Decapsulation */
+#define SFE_XFRM_SA_FLAG_IPV6		BIT(2)	/* IPv6 or IPv4 */
+#define SFE_XFRM_SA_FLAG_UDP		BIT(3)	/* IPv4 ESPinUDP or IPv4 ESP */
+#define SFE_XFRM_SA_FLAG_CP_TOS		BIT(4)	/* Copy DSCP from inner IPv4 to outer IPv4/IPv6 */
+#define SFE_XFRM_SA_FLAG_CP_DF		BIT(5)	/* Copy DF from inner IPv4 to Outer IPv4 */
+
+#define SFE_XFRM_DEV_STATS_DWORDS	sizeof(struct sfe_xfrm_dev_stats) / sizeof(uint64_t)
+#define SFE_XFRM_SA_STATS_DWORDS	sizeof(struct sfe_xfrm_sa_stats) / sizeof(uint64_t)
+
+struct sfe_xfrm_sa;
+
+/*
+ * IPsec SA statistics.
+ */
+struct sfe_xfrm_sa_stats {
+	uint64_t tx_pkts;		/* Packet enqueued to DMA */
+	uint64_t tx_bytes;		/* Bytes enqueued to DMA */
+	uint64_t rx_pkts;		/* Packet completed by DMA */
+	uint64_t rx_bytes;		/* Byte completed by DMA */
+
+	uint64_t fail_route;		/* Route not found error */
+	uint64_t fail_enqueue;		/* DMA transmit failure */
+	uint64_t fail_transform;	/* transformation error */
+	uint64_t fail_sp_alloc;		/* transformation error */
+	uint64_t fail_dst_cache;	/* DST cache not present */
+	uint64_t fail_seq;		/* Sequence overflowed */
+};
+
+/*
+ * SA header
+ */
+struct sfe_xfrm_sa_hdr {
+	__be32 src_ip[4];		/* Source IP address */
+	__be32 dst_ip[4];		/* Destination IP address */
+	__be32 spi;			/* ESP SPI value */
+	__be16 sport;			/* UDP source port */
+	__be16 dport;			/* UDP destination port */
+};
+
+/*
+ * SA state
+ */
+struct sfe_xfrm_sa_state {
+	uint32_t flags;				/* SA flags */
+
+	union {
+		struct sfe_xfrm_sa_state_enc {
+			void (*add_hdr)(struct sfe_xfrm_sa *sa, struct sk_buff *skb);
+			void (*encrypt_auth)(struct sfe_xfrm_sa *sa, struct sk_buff *skb);
+			void (*ip_send)(struct sfe_xfrm_sa *sa, struct sk_buff *skb);
+
+			struct dst_cache dst_cache;
+						/* Destination route cache */
+			uint32_t iv_seq[2];	/* Sequence used for explicit IV generation */
+			uint32_t salt[2];	/* Salt value used for explicit IV generation */
+			uint32_t nonce;		/* Nonce used for encapsulation IV */
+			uint32_t esp_seq;	/* ESP Sequence value */
+			uint16_t head_room;	/* Headroom required for encapsulation */
+			uint16_t tail_room;	/* Tailroom required for encapsulation */
+			uint16_t mtu_overhead;	/* Maximum encapsulation size added by SA */
+			uint8_t icv_len;	/* Encapsulation ICV length */
+			uint8_t iv_len;		/* Encapsulation IV length */
+			uint8_t blk_len;	/* Alignment required for data */
+			uint8_t esp_offset;	/* IV offset in header */
+		} enc;
+
+		struct sfe_xfrm_sa_state_dec {
+			int (*auth_decrypt)(struct sfe_xfrm_sa *sa, struct sk_buff *skb);
+
+			uint32_t nonce;		/* Nonce for the decapsulation operation */
+			uint8_t iv_len;		/* Decapsulation IV length */
+			uint8_t icv_len;	/* Decapsulation IV length */
+		} dec;
+	};
+};
+
+/*
+ * IPsec SA object. Packet processing will syncronous hence will be protected under rcu lock.
+ */
+struct sfe_xfrm_sa {
+	struct sfe_xfrm_dev *dev;		/* Parent dev object */
+	struct net_device *ndev;		/* Linux netdevice representation for this device */
+	int ifindex;				/* Netdevice interface number */
+
+	struct xfrm_state *xs;          	/* offloaded xfrm sate associated with SA */
+	struct crypto_sync_skcipher *enc_tfm;	/* Cipher crypto context */
+        struct crypto_shash *auth_tfm;		/* Authentication context */
+
+	struct sfe_xfrm_sa_hdr hdr;		/* Encapsulation template Pre-initialized during allocation */
+	struct sfe_xfrm_sa_state state;		/* Encapsulation template Pre-initialized during allocation */
+
+	struct sfe_xfrm_sa_stats __percpu *stats_pcpu;
+						/* SA statistics */
+};
+
+/*
+ * IPsec device statistics.
+ */
+struct sfe_xfrm_dev_stats {
+	uint64_t sa_alloc;		/* Number of (rekeyed) SA added */
+	uint64_t sa_free;		/* Number of SA removed */
+
+	/*
+	 * Tx is for encapsulation packets.
+	 */
+	uint64_t tx_pkts;		/* Encap Packet transmitted */
+	uint64_t tx_bytes;		/* Encap Bytes transmitted */
+	uint64_t tx_fail;		/* Encapsulation failure */
+	uint64_t tx_fail_sa;		/* sa not found failure */
+	uint64_t tx_linearize;		/* Encap Packet linearized */
+	uint64_t tx_fail_shared;	/* Shared SKB */
+	uint64_t tx_fail_hroom;		/* SKB headroom failure */
+	uint64_t tx_fail_troom;		/* SKB tailroom failure */
+	uint64_t tx_fail_linearize;	/* Linearization failure */
+
+	/*
+	 * Rx is for decapsulation packets.
+	 */
+	uint64_t rx_pkts;		/* Decap Packet received */
+	uint64_t rx_bytes;		/* Decap Bytes received */
+	uint64_t rx_dummy;		/* Dummy TFC packet received */
+	uint64_t rx_fail;		/* Decapsulation failure */
+	uint64_t rx_fail_sa;		/* sa not found failure */
+	uint64_t rx_linearize;		/* Decap Packet linearized */
+	uint64_t rx_fail_linearize;	/* Linearization failure */
+};
+
+/*
+ * IPsec device object.
+ */
+struct sfe_xfrm_dev {
+	struct net_device *ndev;	/* Linux netdevice representation for this device */
+	struct list_head node;		/* Node in Global device list */
+	struct kref ref;		/* References taken on Ipsec device object. */
+	int64_t xfrm_reqid;		/* Device ID */
+	struct sfe_xfrm_sa __rcu *sa;	/* Active encapsulation SA */
+	struct dentry *dentry;		/* Driver debugfs dentry */
+	struct sfe_xfrm_dev_stats __percpu *stats_pcpu;
+					/* Device statistics */
+};
+
+/*
+ * IPsec global module object.
+ */
+struct sfe_xfrm {
+	struct list_head dev_head;	/* Device database */
+
+	struct kmem_cache *sa_cache;	/* Kmem cache for SA memory */
+	struct dentry *dentry;		/* Driver debugfs dentry */
+
+	spinlock_t lock;		/* Lock for control path operation */
+};
+
+extern struct sfe_xfrm g_sfe_xfrm;	/* Global Driver object */
+
+/*
+ * Encapsulation Datapath function registered with Netdevice xmit.
+ */
+netdev_tx_t sfe_xfrm_enc(struct sk_buff *skb, struct net_device *ndev);
+void sfe_xfrm_add_hdr_v4(struct sfe_xfrm_sa *sa, struct sk_buff *skb);
+void sfe_xfrm_add_hdr_natt(struct sfe_xfrm_sa *sa, struct sk_buff *skb);
+void sfe_xfrm_encrypt_auth_gcm(struct sfe_xfrm_sa *sa, struct sk_buff *skb);
+void sfe_xfrm_ip4_send(struct sfe_xfrm_sa *sa, struct sk_buff *skb);
+
+/*
+ * Decapsulation Datapath function register with ESP protocol handler.
+ */
+int sfe_xfrm_dec_esp4(struct sk_buff *skb);
+int sfe_xfrm_dec_natt(struct sock *sk, struct sk_buff *skb);
+int sfe_xfrm_auth_decrypt_gcm(struct sfe_xfrm_sa *sa, struct sk_buff *skb);
+
+/*
+ * Control path initialization function.
+ */
+void sfe_xfrm_ctrl_init(void);
+
+#endif /* !__SFE_XFRM_H */
diff --git a/qca-nss-sfe/tunnels/ipsec/sfe_xfrm_ctrl.c b/qca-nss-sfe/tunnels/ipsec/sfe_xfrm_ctrl.c
new file mode 100644
index 0000000..321c78b
--- /dev/null
+++ b/qca-nss-sfe/tunnels/ipsec/sfe_xfrm_ctrl.c
@@ -0,0 +1,1253 @@
+/*
+ * Copyright (c) 2022-2023, Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <net/xfrm.h>
+#include <net/protocol.h>
+#include <net/ip6_route.h>
+#include <linux/inetdevice.h>
+#include <linux/debugfs.h>
+#include <linux/netfilter.h>
+#include <crypto/rng.h>
+#include <crypto/aes.h>
+#include <crypto/gcm.h>
+#include <crypto/ghash.h>
+#include "sfe_xfrm.h"
+
+/*
+ * Structure to store necessary fields during xfrm state walk.
+ */
+struct sfe_xfrm_iter_data {
+	struct net_device *ndev;
+	ssize_t max_len;
+	char *buf;
+};
+
+/*
+ * Structure to map crypto init function to xfrm state algo.
+ */
+struct sfe_xfrm_algo {
+	const char *algo_name;				/* Crypto algorithm name */
+	int (*crypto_init)(struct sfe_xfrm_sa *sa, struct xfrm_state *xs, bool is_decap);
+							/* pointer to crypto init */
+};
+
+static int sfe_xfrm_crypto_init_gcm(struct sfe_xfrm_sa *sa, struct xfrm_state *xs, bool is_decap);
+static void sfe_xfrm_dev_final(struct kref *kref);
+
+static struct sfe_xfrm_algo xfrm_algo[] = {
+	{.algo_name = "rfc4106(gcm(aes))", .crypto_init = sfe_xfrm_crypto_init_gcm},
+};
+
+/*
+ * Original ESP protocol handlers
+ */
+static const struct net_protocol *linux_esp_handler;
+
+/*
+ * sfe_xfrm_genkey()
+ */
+static int sfe_xfrm_genkey(struct crypto_sync_skcipher *etfm, uint8_t *key, uint16_t len)
+{
+	uint8_t iv[AES_BLOCK_SIZE] = {0};
+	struct scatterlist sg[1];
+	int ret;
+
+	/*
+	 * TODO: Move this to heap
+	 */
+	SYNC_SKCIPHER_REQUEST_ON_STACK(ereq, etfm);
+
+	sg_init_one(sg, key, len);
+	skcipher_request_set_sync_tfm(ereq, etfm);
+	skcipher_request_set_callback(ereq, 0, NULL, NULL);
+	skcipher_request_set_crypt(ereq, sg, sg, len, iv);
+
+	ret = crypto_skcipher_encrypt(ereq);
+	if (ret) {
+		pr_debug("%px: Failed to encrypt; err(%d)\n", etfm, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+/*
+ * sfe_xfrm_crypto_init_gcm()
+ *	Crypto init func for GCM.
+ */
+static int sfe_xfrm_crypto_init_gcm(struct sfe_xfrm_sa *sa, struct xfrm_state *xs, bool is_decap)
+{
+	uint8_t authkey[AES_BLOCK_SIZE] = {0};
+	uint8_t *cipher_key;
+	unsigned int cipher_keylen;
+	unsigned int key_len = 0;
+	int err;
+
+	key_len = ALIGN(xs->aead->alg_key_len, BITS_PER_BYTE) / BITS_PER_BYTE;
+
+	/*
+	 * Cipher key
+	 */
+	cipher_keylen = key_len - 4; /* Subtract nonce */
+	cipher_key = xs->aead->alg_key;
+
+	/*
+	 * Allocate the cipher context
+	 */
+	sa->enc_tfm = crypto_alloc_sync_skcipher("ctr(aes)", 0, 0);
+	if (IS_ERR(sa->enc_tfm)) {
+		pr_err("%px: Error allocating tfm for skcipher: ctr(aes)\n", sa);
+		return PTR_ERR(sa->enc_tfm);
+	}
+
+	/*
+	 * Setup cipher keys
+	 */
+	err = crypto_sync_skcipher_setkey(sa->enc_tfm, cipher_key, cipher_keylen);
+	if (err) {
+		pr_err("%px: Failed to set the key for skcipher: ctr(aes)\n", sa);
+		goto fail2;
+	}
+
+	if (is_decap) {
+		struct sfe_xfrm_sa_state_dec *dec = &sa->state.dec;
+
+		dec->icv_len = xs->aead->alg_icv_len / BITS_PER_BYTE;
+		dec->iv_len = GCM_RFC4106_IV_SIZE;
+
+		memcpy(&dec->nonce, cipher_key + cipher_keylen, 4);
+
+		dec->auth_decrypt = sfe_xfrm_auth_decrypt_gcm;
+	} else {
+		struct sfe_xfrm_sa_state_enc *enc = &sa->state.enc;
+
+		enc->icv_len = xs->aead->alg_icv_len / BITS_PER_BYTE;
+		enc->iv_len = GCM_RFC4106_IV_SIZE;
+		enc->blk_len = ALIGN(crypto_sync_skcipher_blocksize(sa->enc_tfm), 4);
+		memcpy(&enc->nonce, cipher_key + cipher_keylen, 4);
+
+		get_random_bytes(&enc->salt, sizeof(enc->salt));
+		get_random_bytes(&enc->iv_seq, sizeof(enc->iv_seq));
+
+		enc->mtu_overhead += SFE_XFRM_MAX_ESP_GCM_OVERHEAD;
+		enc->encrypt_auth = sfe_xfrm_encrypt_auth_gcm;
+	}
+
+	/*
+	 * Generate authentication key
+	 */
+	err = sfe_xfrm_genkey(sa->enc_tfm, authkey, sizeof(authkey));
+	if (err) {
+		pr_warn("%px: Failed to generate authentication key for GCM\n", sa);
+		goto fail2;
+	}
+
+	/*
+	 * Allocate authentication context.
+	 * Use GHASH CE Driver.
+	 */
+	sa->auth_tfm = crypto_alloc_shash("__driver-ghash-ce", CRYPTO_ALG_INTERNAL, CRYPTO_ALG_INTERNAL);
+	if (IS_ERR(sa->auth_tfm)) {
+		pr_err("%px: Error allocating tfm for shash: ghash\n", sa);
+		err = PTR_ERR(sa->auth_tfm);
+		goto fail2;
+	}
+
+	/*
+	 * Setup authentication key
+	 */
+	err = crypto_shash_setkey(sa->auth_tfm, authkey, sizeof(authkey));
+	if (err) {
+		pr_err("%px: Failed to set the key for auth: ghash\n", sa);
+		goto fail1;
+	}
+
+	/*
+	 * Reset the memory that was allocated for confidentiality
+	 */
+	memzero_explicit(authkey, sizeof(authkey));
+	memzero_explicit(cipher_key, cipher_keylen);
+
+	pr_info("skcipher driver name: %s\n", crypto_tfm_alg_driver_name(crypto_skcipher_tfm((struct crypto_skcipher *)sa->enc_tfm)));
+	pr_info("shash driver name: %s\n", crypto_tfm_alg_driver_name(crypto_shash_tfm(sa->auth_tfm)));
+	return 0;
+
+fail1:
+	crypto_free_shash(sa->auth_tfm);
+fail2:
+	crypto_free_sync_skcipher(sa->enc_tfm);
+	return err;
+}
+
+/*
+ * sfe_xfrm_open_ndev()
+ *	Netdevice open handler.
+ */
+static int sfe_xfrm_open_ndev(struct net_device *ndev)
+{
+	netif_start_queue(ndev);
+	return 0;
+}
+
+/*
+ * sfe_xfrm_stop_ndev()
+ *	Netdevice stop handler.
+ */
+static int sfe_xfrm_stop_ndev(struct net_device *ndev)
+{
+	netif_stop_queue(ndev);
+	return 0;
+}
+
+/*
+ * sfe_xfrm_get_dev_stats()
+ *	Fetch all the device statistics.
+ */
+static void sfe_xfrm_get_dev_stats(struct sfe_xfrm_dev *dev, struct sfe_xfrm_dev_stats *stats)
+{
+	int cpu;
+	int i;
+
+	/*
+	 * All statistics are 64bit. So we can just iterate by words.
+	 */
+	for_each_possible_cpu(cpu) {
+		const struct sfe_xfrm_dev_stats *sp = per_cpu_ptr(dev->stats_pcpu, cpu);
+		uint64_t *stats_ptr = (uint64_t *)stats;
+		uint64_t *sp_ptr = (uint64_t *)sp;
+
+		for (i = 0; i < SFE_XFRM_DEV_STATS_DWORDS; i++, stats_ptr++, sp_ptr++) {
+			*stats_ptr += *sp_ptr;
+		}
+	}
+}
+
+/*
+ * sfe_xfrm_get_sa_stats()
+ *	Fetch all the SA statistics.
+ */
+static void sfe_xfrm_get_sa_stats(struct sfe_xfrm_sa *sa, struct sfe_xfrm_sa_stats *stats)
+{
+	int cpu;
+	int i;
+
+	/*
+	 * All statistics are 64bit. So we can just iterate by words.
+	 */
+	for_each_possible_cpu(cpu) {
+		const struct sfe_xfrm_sa_stats *sp = per_cpu_ptr(sa->stats_pcpu, cpu);
+		uint64_t *stats_ptr = (uint64_t *)stats;
+		uint64_t *sp_ptr = (uint64_t *)sp;
+
+		for (i = 0; i < SFE_XFRM_SA_STATS_DWORDS; i++, stats_ptr++, sp_ptr++) {
+			*stats_ptr += *sp_ptr;
+		}
+	}
+}
+
+/*
+ * sfe_xfrm_dump_sa_stats()
+ *	Print the SA statistics.
+ */
+static int sfe_xfrm_dump_sa_stats(struct xfrm_state *xs, int count, void *ptr)
+{
+	struct sfe_xfrm_iter_data *iter = (struct sfe_xfrm_iter_data *)ptr;
+	struct sfe_xfrm_sa_stats stats = {0};
+	struct sfe_xfrm_sa *sa;
+	ssize_t len = 0;
+	bool is_encap;
+	char *hdr;
+
+	/*
+	 * Non NSS offloaded SA
+	 */
+	if (!(xs->xflags & XFRM_STATE_OFFLOAD_NSS))
+		return 0;
+
+	/*
+	 * SA belongs to a different NETDEVICE
+	 */
+	if (xs->offload_dev != iter->ndev)
+		return 0;
+
+	BUG_ON(xs->props.family == AF_INET6);
+	sa = xs->data;
+
+	/*
+	 * We need to first fetch the stats values
+	 * from the SA object.
+	 */
+	sfe_xfrm_get_sa_stats(sa, &stats);
+
+	is_encap = (sa->state.flags & SFE_XFRM_SA_FLAG_ENC);
+	hdr = is_encap ? "Encap SA" : "Decap SA";
+
+	len += snprintf(iter->buf + len, iter->max_len - len,
+		"%s: (src:%pI4n dst:%pI4n spi:0x%X sport:%u dport:%u flags:0x%x)\n", hdr,
+		sa->hdr.src_ip, sa->hdr.dst_ip, htonl(sa->hdr.spi), htons(sa->hdr.sport),
+		htons(sa->hdr.dport), sa->state.flags);
+
+	len += snprintf(iter->buf + len, iter->max_len - len, "\tTx packets: %llu\n", stats.tx_pkts);
+	len += snprintf(iter->buf + len, iter->max_len - len, "\tTx bytes: %llu\n", stats.tx_bytes);
+	len += snprintf(iter->buf + len, iter->max_len - len, "\tRx packets: %llu\n", stats.rx_pkts);
+	len += snprintf(iter->buf + len, iter->max_len - len, "\tRx bytes: %llu\n", stats.rx_bytes);
+	len += snprintf(iter->buf + len, iter->max_len - len, "\tDst cache error: %llu\n", stats.fail_dst_cache);
+	len += snprintf(iter->buf + len, iter->max_len - len, "\tEnqueue error: %llu\n", stats.fail_enqueue);
+	len += snprintf(iter->buf + len, iter->max_len - len, "\tTransformation error: %llu\n", stats.fail_transform);
+	len += snprintf(iter->buf + len, iter->max_len - len, "\tRoute error: %llu\n", stats.fail_route);
+	len += snprintf(iter->buf + len, iter->max_len - len, "\tSP allocation error: %llu\n", stats.fail_sp_alloc);
+	len += snprintf(iter->buf + len, iter->max_len - len, "\tSequence error: %llu\n", stats.fail_seq);
+
+	iter->buf += len;
+	iter->max_len -= len;
+
+	/*
+	 * Stop processing if the available length is zero.
+	 */
+	return !iter->max_len;
+}
+
+/*
+ * sfe_xfrm_dump_dev_stats()
+ *	Print the Device statistics.
+ */
+static ssize_t sfe_xfrm_dump_dev_stats(struct sfe_xfrm_dev_stats *stats, char *buf, ssize_t max_len)
+{
+	ssize_t len = 0;
+
+	len = snprintf(buf, max_len, "SA Alloc: %llu\n", stats->sa_alloc);
+	len += snprintf(buf + len, max_len - len, "SA Free: %llu\n", stats->sa_free);
+	len += snprintf(buf + len, max_len - len, "Device Encapsulation Statistics:\n");
+	len += snprintf(buf + len, max_len - len, "\tTx packets: %llu\n", stats->tx_pkts);
+	len += snprintf(buf + len, max_len - len, "\tTx bytes: %llu\n", stats->tx_bytes);
+	len += snprintf(buf + len, max_len - len, "\tTx linearize: %llu\n", stats->tx_linearize);
+	len += snprintf(buf + len, max_len - len, "\tTx Fail: %llu\n", stats->tx_fail);
+	len += snprintf(buf + len, max_len - len, "\tTx Fail SA: %llu\n", stats->tx_fail_sa);
+	len += snprintf(buf + len, max_len - len, "\tTx Fail shared: %llu\n", stats->tx_fail_shared);
+	len += snprintf(buf + len, max_len - len, "\tTx Fail headroom: %llu\n", stats->tx_fail_hroom);
+	len += snprintf(buf + len, max_len - len, "\tTx Fail tailroom: %llu\n", stats->tx_fail_troom);
+	len += snprintf(buf + len, max_len - len, "\tTx Fail Linearize: %llu\n", stats->tx_fail_linearize);
+
+	len += snprintf(buf + len, max_len - len, "Device Decapsulation Statistics:\n");
+	len += snprintf(buf + len, max_len - len, "\tRx packets: %llu\n", stats->rx_pkts);
+	len += snprintf(buf + len, max_len - len, "\tRx bytes: %llu\n", stats->rx_bytes);
+	len += snprintf(buf + len, max_len - len, "\tRx linearize: %llu\n", stats->rx_linearize);
+	len += snprintf(buf + len, max_len - len, "\tRx dummy: %llu\n", stats->rx_dummy);
+	len += snprintf(buf + len, max_len - len, "\tRx Fail: %llu\n", stats->rx_fail);
+	len += snprintf(buf + len, max_len - len, "\tRx Fail SA: %llu\n", stats->rx_fail_sa);
+	len += snprintf(buf + len, max_len - len, "\tRx Fail Linearize: %llu\n", stats->rx_fail_linearize);
+
+	return len;
+}
+
+/*
+ * sfe_xfrm_dump_all_stats()
+ *	Read all device and SA statistics.
+ */
+static ssize_t sfe_xfrm_dump_all_stats(struct file *fp, char __user *ubuf, size_t sz, loff_t *ppos)
+{
+	struct sfe_xfrm_dev *dev = fp->private_data;
+	struct sfe_xfrm_dev_stats dev_stats = {0};
+	struct sfe_xfrm_iter_data iter = {0};
+	struct xfrm_state_walk walk;
+	int32_t sa_count;
+	ssize_t len = 0;
+	ssize_t max_len;
+	char *buf;
+
+	/*
+	 * Fetch the stats values from the device object.
+	 */
+	sfe_xfrm_get_dev_stats(dev, &dev_stats);
+	sa_count = dev_stats.sa_alloc - dev_stats.sa_free;
+	if (sa_count < 0)
+		sa_count = 0;
+
+	/*
+	 * Calculate required string buffer for stats.
+	 */
+	max_len = SFE_XFRM_DEV_STATS_DWORDS * SFE_XFRM_MAX_STR_LEN; /* Members */
+	max_len += SFE_XFRM_MAX_STR_LEN; /* Encap heading */
+	max_len += SFE_XFRM_MAX_STR_LEN; /* Decap heading */
+	max_len += SFE_XFRM_MAX_STR_LEN * sa_count; /* SA header */
+	max_len += SFE_XFRM_SA_STATS_DWORDS * SFE_XFRM_MAX_STR_LEN * sa_count ; /* SA Members */
+
+	/*
+	 * Allocate the buffer.
+	 */
+	buf = vzalloc(max_len);
+	if (!buf) {
+		pr_warn("%px: failed to allocate print buffer (%zu)", dev, max_len);
+		return 0;
+	}
+
+	/*
+	 * Print the device statistics.
+	 */
+	len += sfe_xfrm_dump_dev_stats(&dev_stats, buf, max_len);
+
+	/*
+	 * No active SA.
+	 */
+	if (!sa_count) {
+		goto done;
+	}
+
+	/*
+	 * Initialize the walk object for ESP xfrm state.
+	 */
+	xfrm_state_walk_init(&walk, IPPROTO_ESP, NULL);
+
+	/*
+	 * We need the below fields to selectively
+	 * print the necessary SA stats.
+	 *
+	 * Since there is no direct way to pass these fields to the
+	 * (sfe_xfrm_dump_sa_stats) callback, we pass this as a pointer
+	 * (iter).
+	 */
+	iter.ndev = dev->ndev;
+	iter.buf = buf + len;
+	iter.max_len = max_len - len;
+
+	xfrm_state_walk(&init_net, &walk, sfe_xfrm_dump_sa_stats, &iter);
+	xfrm_state_walk_done(&walk, &init_net);
+	len = iter.buf - buf;
+
+done:
+	len = simple_read_from_buffer(ubuf, sz, ppos, buf, len);
+	vfree(buf);
+	return len;
+}
+
+/*
+ * sfe_xfrm_get_rtnl_stats()
+ *	Handler to fetch netdevice rtnl statistics.
+ */
+static void sfe_xfrm_get_rtnl_stats(struct net_device *ndev, struct rtnl_link_stats64 *rtnl_stats)
+{
+	struct sfe_xfrm_dev *dev = netdev_priv(ndev);
+	struct sfe_xfrm_dev_stats stats = {0};
+
+	memset(rtnl_stats, 0, sizeof(*rtnl_stats));
+	sfe_xfrm_get_dev_stats(dev, &stats);
+
+	rtnl_stats->tx_packets = stats.tx_pkts;
+	rtnl_stats->tx_bytes = stats.tx_bytes;
+	rtnl_stats->tx_dropped = stats.tx_fail;
+	rtnl_stats->rx_packets = stats.rx_pkts;
+	rtnl_stats->rx_bytes = stats.rx_bytes;
+	rtnl_stats->rx_dropped = stats.rx_fail;
+}
+
+/*
+ * sfe_xfrm_mtu_set()
+ *	Update device MTU.
+ */
+static int sfe_xfrm_mtu_set(struct net_device *ndev, int mtu)
+{
+	ndev->mtu = mtu;
+	return 0;
+}
+
+/*
+ * sfe_xfrm_xmit()
+ *	This is called for IPv4/v6 pakcets that are to be transformed.
+ */
+static int sfe_xfrm_xmit(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+	return dev_queue_xmit(skb);
+}
+
+/*
+ * sfe_xfrm_v4_output()
+ *	Called for IPv4 Plain text packets submitted for IPSec transformation.
+ */
+static int sfe_xfrm_v4_output(struct net *net, struct sock *sk, struct sk_buff *skb)
+{
+	struct sfe_xfrm_sa_state_enc *enc;
+	struct sfe_xfrm_sa *sa;
+	struct xfrm_state *xs;
+	bool expand_skb;
+
+	/*
+	 * No xfrm_state associated; Drop
+	 */
+	xs = skb_dst(skb)->xfrm;
+	if (!xs) {
+		pr_warn("%px: Failed to offload; No xfrm_state associated: drop\n", skb);
+		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTNOSTATES);
+		goto drop;
+	}
+
+	/*
+	 * Only process packets for XFRM state managed by IPsec offload
+	 */
+	if (!(xs->xflags & XFRM_STATE_OFFLOAD_NSS)) {
+		pr_debug("%px: state is not offloaded; xfrm_state %p :drop\n", skb, xs);
+		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTSTATEINVALID);
+		goto drop;
+	}
+
+	/*
+	 * Unshare the SKB as we will be modifying it.
+	 */
+	if (unlikely(skb_shared(skb))) {
+		skb = skb_unshare(skb, GFP_NOWAIT | __GFP_NOWARN);
+		if (!skb) {
+			goto drop;
+		}
+	}
+
+	skb->dev = xs->offload_dev;
+	sa = xs->data;
+	enc = &sa->state.enc;
+
+	/*
+	 * Expand the SKB if needed.
+	 */
+	expand_skb = (skb_headroom(skb) < enc->head_room) || (skb_tailroom(skb) < enc->tail_room);
+	if (expand_skb && pskb_expand_head(skb, enc->head_room, enc->tail_room, GFP_NOWAIT | __GFP_NOWARN)) {
+		pr_debug("%px: Failed to expand SKB head(%u) or tail(%u)\n", skb, skb_headroom(skb), skb_tailroom(skb));
+		XFRM_INC_STATS(net, LINUX_MIB_XFRMOUTERROR);
+		goto drop;
+	}
+
+	/*
+	 * Call the Post routing hooks.
+	 */
+	return NF_HOOK(NFPROTO_IPV4, NF_INET_POST_ROUTING, net, sk, skb, NULL, skb_dst(skb)->dev, sfe_xfrm_xmit);
+
+drop:
+	dev_kfree_skb_any(skb);
+	return -EINVAL;
+}
+
+/*
+ * sfe_xfrm_dev_free()
+ *	Free netdevice memory.
+ */
+static void sfe_xfrm_dev_free(struct net_device *ndev)
+{
+	struct sfe_xfrm_dev *dev = netdev_priv(ndev);
+
+	/*
+	 * There should be no active references at this point.
+	 */
+	WARN_ON(kref_read(&dev->ref));
+
+	free_percpu(dev->stats_pcpu);
+	debugfs_remove_recursive(dev->dentry);
+	free_netdev(ndev);
+	pr_info("%px: IPsec device freed\n", ndev);
+}
+
+/*
+ * IPsec device callbacks.
+ */
+static const struct net_device_ops xfrm_dev_ops = {
+	.ndo_open = sfe_xfrm_open_ndev,
+	.ndo_stop = sfe_xfrm_stop_ndev,
+	.ndo_start_xmit = sfe_xfrm_enc,
+	.ndo_get_stats64 = sfe_xfrm_get_rtnl_stats,
+	.ndo_change_mtu = sfe_xfrm_mtu_set,
+};
+
+/*
+ * sfe_xfrm_dev_setup()
+ *	Setup ipsec connection device.
+ */
+static void sfe_xfrm_dev_setup(struct net_device *ndev)
+{
+	ndev->addr_len = ETH_ALEN;
+	ndev->mtu = ETH_DATA_LEN - SFE_XFRM_DEV_MAX_HEADROOM;
+
+	ndev->hard_header_len = SFE_XFRM_DEV_MAX_HEADROOM;
+	ndev->needed_headroom = SFE_XFRM_DEV_MAX_HEADROOM;
+	ndev->needed_tailroom = SFE_XFRM_DEV_MAX_TAILROOM;
+	ndev->type = SFE_XFRM_DEV_ARPHRD;
+	ndev->ethtool_ops = NULL;
+	ndev->header_ops = NULL;
+	ndev->netdev_ops = &xfrm_dev_ops;
+	ndev->priv_destructor = sfe_xfrm_dev_free;
+
+	/*
+	 * Assign random ethernet address.
+	 */
+	random_ether_addr(ndev->dev_addr);
+	memset(ndev->broadcast, 0xff, ndev->addr_len);
+	memcpy(ndev->perm_addr, ndev->dev_addr, ndev->addr_len);
+}
+
+/*
+ * sfe_xfrm_udp_override()
+ */
+static void sfe_xfrm_udp_override(struct sfe_xfrm_sa *sa)
+{
+	struct sfe_xfrm *g_xfrm = &g_sfe_xfrm;
+	uint32_t src_ip, dst_ip;
+	struct udp_sock *up;
+	struct sock *sk;
+
+	/*
+	 * Only ESP-over-UDP & decapsulation flow will be processed
+	 */
+	if (!(sa->state.flags & SFE_XFRM_SA_FLAG_UDP) || (sa->state.flags & SFE_XFRM_SA_FLAG_ENC))
+		return;
+
+	src_ip = sa->hdr.src_ip[0];
+	dst_ip = sa->hdr.dst_ip[0];
+
+	rcu_read_lock();
+	sk = __udp4_lib_lookup(&init_net, src_ip, sa->hdr.sport, dst_ip, sa->hdr.dport, 0, 0, &udp_table, NULL);
+	if (!sk) {
+		rcu_read_unlock();
+		pr_err("%px: Failed to lookup UDP socket dst(%pI4h) dport(0x%X)\n", g_xfrm, sa->hdr.dst_ip, sa->hdr.dport);
+		return;
+	}
+
+	up = udp_sk(sk);
+	if (up->encap_type != UDP_ENCAP_ESPINUDP) {
+		rcu_read_unlock();
+		pr_err("%px: Socket type is not UDP_ENCAP_ESPINUDP (%u)\n", up, up->encap_type);
+		return;
+	}
+
+	if (READ_ONCE(up->encap_rcv) != sfe_xfrm_dec_natt) {
+		xchg(&up->encap_rcv, sfe_xfrm_dec_natt);
+		pr_debug("%px: Overriden socket encap handler\n", up);
+	}
+
+	rcu_read_unlock();
+}
+
+/*
+ * sfe_xfrm_crypto_init()
+ *	Allocate skcipher and shash crypto transform.
+ */
+static int sfe_xfrm_crypto_init(struct xfrm_state *xs, struct sfe_xfrm_sa *sa, bool is_decap)
+{
+	struct sfe_xfrm_algo *xalg = xfrm_algo;
+	char *alg_name;
+	uint32_t i;
+
+	/*
+	 * TODO: Only supports combined mode Cipher/authentication
+	 */
+	alg_name = xs->aead ? xs->aead->alg_name : NULL;
+
+	/*
+	 * Perform algorithm specific crypto initialisation.
+	 */
+	for (i = 0; i < ARRAY_SIZE(xfrm_algo); i++, xalg++) {
+		if (alg_name && !strncmp(xalg->algo_name, alg_name, strlen(xalg->algo_name))) {
+			return xalg->crypto_init(sa, xs, is_decap);
+		}
+	}
+
+	pr_warn("%px: Unsupported algorithm for IPsec\n", xs);
+	return -1;
+}
+
+/*
+ * sfe_xfrm_sa_add()
+ *	Add new IPsec SA for given xfrm state.
+ */
+static int sfe_xfrm_sa_add(struct sfe_xfrm_dev *dev, struct xfrm_state *xs)
+{
+	struct sfe_xfrm_dev_stats *dev_stats = this_cpu_ptr(dev->stats_pcpu);
+	struct sfe_xfrm *g_xfrm = &g_sfe_xfrm;
+	struct net_device *ndev = dev->ndev;
+	struct dst_cache *dst_cache = NULL;
+	struct sfe_xfrm_sa *sa;
+	bool is_decap = false;
+	bool is_natt = false;
+	int err;
+
+	/*
+	 * SA flag shouldn't be set
+	 */
+	BUG_ON(xs->xflags & XFRM_STATE_OFFLOAD_NSS);
+	BUG_ON(xs->props.family == AF_INET6);
+
+	/*
+	 * SA object allocation.
+	 */
+	sa = kmem_cache_alloc(g_xfrm->sa_cache, GFP_KERNEL | __GFP_ZERO);
+	if (!sa) {
+		pr_warn("%px: Failed to allocate SA\n", ndev);
+		return -ENOMEM;
+	}
+
+	sa->stats_pcpu = alloc_percpu_gfp(struct sfe_xfrm_sa_stats, GFP_KERNEL | __GFP_ZERO);
+	if (!sa->stats_pcpu) {
+		pr_err("%px: Failed to allocate stats memory for SA\n", ndev);
+		err = -ENOMEM;
+		goto fail_pcpu;
+	}
+
+	/*
+	 * Initialise the SA object. Find the SA direction.
+	 * For Decapsulation SA, the Destination address is local.
+	 */
+	if (xs->props.family == AF_INET) {
+		struct sfe_xfrm_sa_state_enc *enc;
+		struct net_device *local_dev;
+
+		sa->hdr.src_ip[0] = xs->props.saddr.a4;
+		sa->hdr.dst_ip[0] = xs->id.daddr.a4;
+
+		/*
+		 * Check for NAT-T flow
+		 */
+		is_natt = !!xs->encap;
+		sa->state.flags |= is_natt ? SFE_XFRM_SA_FLAG_UDP : 0;
+
+		/*
+		 * Find the SA direction (encap or decap)
+		 */
+		local_dev = ip_dev_find(&init_net, xs->id.daddr.a4);
+		if (local_dev) {
+			dev_put(local_dev);
+			is_decap = true;
+
+			goto init;
+		}
+
+		/*
+		 * Destination is remote hence this is an encapsulation SA
+		 */
+		enc = &sa->state.enc;
+		sa->state.flags |= SFE_XFRM_SA_FLAG_ENC;
+		dst_cache = &enc->dst_cache;
+
+		err = dst_cache_init(dst_cache, GFP_KERNEL);
+		if (err) {
+			pr_err("%px: Failed to initialize dst for SA\n", ndev);
+			goto fail_dst;
+		}
+
+		enc->mtu_overhead = sizeof(struct iphdr);
+		enc->head_room = SFE_XFRM_DEV_MAX_HEADROOM;
+		enc->tail_room = SFE_XFRM_DEV_MAX_TAILROOM;
+		enc->esp_seq = 1;
+		enc->esp_offset = sizeof(struct iphdr);
+		enc->add_hdr = sfe_xfrm_add_hdr_v4;
+		enc->ip_send = sfe_xfrm_ip4_send;
+		if (is_natt) {
+			enc->mtu_overhead += sizeof(struct udphdr);
+			enc->esp_offset += sizeof(struct udphdr);
+			enc->add_hdr = sfe_xfrm_add_hdr_natt;
+		}
+	}
+
+init:
+	/*
+	 * Allocate the transform pointer for the
+	 * skcipher and shash.
+	 */
+	err = sfe_xfrm_crypto_init(xs, sa, is_decap);
+	if (err) {
+		pr_err("%px: Crypto Initialisation failed for SA\n", sa);
+		goto fail_crypto;
+	}
+
+	/*
+	 * Dereference: sfe_xfrm_sa_del()
+	 */
+	kref_get(&dev->ref);
+
+	sa->dev = dev;
+	sa->ndev = ndev;
+	sa->ifindex = ndev->ifindex;
+	sa->xs = xs;
+	sa->hdr.spi = xs->id.spi;
+	sa->hdr.sport = is_natt ? xs->encap->encap_sport : 0;
+	sa->hdr.dport = is_natt ? xs->encap->encap_dport : 0;
+
+	sfe_xfrm_udp_override(sa);
+
+	/*
+	 * Make this SA active. For old SA, We wait for all RCU readers during SA deletion.
+	 */
+	if (!is_decap) {
+		uint32_t new_mtu = ETH_DATA_LEN - sa->state.enc.mtu_overhead;
+
+		rtnl_lock();
+		dev_set_mtu(ndev, new_mtu);
+		rtnl_unlock();
+
+		spin_lock_bh(&g_xfrm->lock);
+		rcu_assign_pointer(dev->sa, sa);
+		spin_unlock_bh(&g_xfrm->lock);
+	}
+
+	WRITE_ONCE(xs->data, sa);
+	dev_stats->sa_alloc++;
+	return 0;
+
+fail_crypto:
+	if (dst_cache) {
+		dst_cache_destroy(dst_cache);
+	}
+
+fail_dst:
+	free_percpu(sa->stats_pcpu);
+
+fail_pcpu:
+	kmem_cache_free(g_xfrm->sa_cache, sa);
+	return err;
+}
+
+/*
+ * sfe_xfrm_sa_del()
+ *	Delete existing IPsec SA for given xfrm state.
+ */
+static void sfe_xfrm_sa_del(struct sfe_xfrm_dev *dev, struct xfrm_state *xs)
+{
+	struct sfe_xfrm_dev_stats *dev_stats;
+	struct sfe_xfrm *g_xfrm = &g_sfe_xfrm;
+	struct sfe_xfrm_sa *sa, *dev_sa;
+
+	sa = READ_ONCE(xs->data);
+	BUG_ON(!sa);
+
+	/*
+	 * SA flag should be set
+	 */
+	BUG_ON((xs->xflags & XFRM_STATE_OFFLOAD_NSS) == 0);
+
+	/*
+	 * if the SA being deleted is the Active
+	 * encap SA, then set the dev->sa to NULL.
+	 *
+	 * TODO: Change it to reference counting
+	 */
+	spin_lock_bh(&g_xfrm->lock);
+	dev_sa = rcu_dereference_protected(dev->sa, lockdep_is_held(&g_xfrm->lock));
+	if (dev_sa == sa) {
+		rcu_assign_pointer(dev->sa, NULL);
+	}
+
+	spin_unlock_bh(&g_xfrm->lock);
+	synchronize_rcu();
+
+	/*
+	 * SA free
+	 */
+	dev_stats = this_cpu_ptr(dev->stats_pcpu);
+	dev_stats->sa_free++;
+
+	sa->xs = NULL;
+
+	/*
+	 * Deallocate the Crypto resources
+	 */
+	crypto_free_sync_skcipher(sa->enc_tfm);
+	crypto_free_shash(sa->auth_tfm);
+
+	/*
+	 * Reference: sfe_xfrm_sa_add
+	 */
+	kref_put(&dev->ref, sfe_xfrm_dev_final);
+	sa->dev = NULL;
+
+	if (sa->state.flags & SFE_XFRM_SA_FLAG_ENC) {
+		dst_cache_destroy(&sa->state.enc.dst_cache);
+	}
+
+	free_percpu(sa->stats_pcpu);
+	kmem_cache_free(g_xfrm->sa_cache, sa);
+}
+
+/*
+ * IPsec device stats callback.
+ */
+const struct file_operations sfe_xfrm_dev_file_ops = {
+	.open = simple_open,
+	.llseek = default_llseek,
+	.read = sfe_xfrm_dump_all_stats,
+};
+
+/*
+ * sfe_xfrm_dev_add_ref()
+ *	Add new IPsec device for given reqid.
+ */
+static struct sfe_xfrm_dev *sfe_xfrm_dev_add_ref(int64_t devid)
+{
+	struct sfe_xfrm *g_xfrm = &g_sfe_xfrm;
+	struct sfe_xfrm_dev *dev;
+	struct list_head *cur;
+	struct net_device *ndev;
+	int status;
+
+	/*
+	 * Fetch the net_device from the db for the given ID.
+	 */
+	spin_lock_bh(&g_xfrm->lock);
+	list_for_each(cur, &g_xfrm->dev_head) {
+		dev = list_entry(cur, struct sfe_xfrm_dev, node);
+
+		/*
+		 * Ensure that we are not incrementing the reference
+		 * if the final is already executing
+		 */
+		if ((dev->xfrm_reqid == devid) && kref_get_unless_zero(&dev->ref)) {
+			break;
+		}
+	}
+
+	spin_unlock_bh(&g_xfrm->lock);
+
+	/*
+	 * Entry is found
+	 */
+	if (cur != &g_xfrm->dev_head) {
+		return dev;
+	}
+
+	/*
+	 * Netdevice not created for this id.
+	 * Allocate new IPsec device for the given XFRM reqid.
+	 */
+	ndev = alloc_netdev(sizeof(*dev), "ipsectun%d", NET_NAME_ENUM, sfe_xfrm_dev_setup);
+	if (!ndev) {
+		pr_err("%px: Failed to allocate IPsec device\n", g_xfrm);
+		return NULL;
+	}
+
+	/*
+	 * Initialize device private structure.
+	 */
+	dev = netdev_priv(ndev);
+
+	dev->ndev = ndev;
+	dev->xfrm_reqid = devid;
+	rcu_assign_pointer(dev->sa, NULL);
+
+	dev->stats_pcpu = alloc_percpu_gfp(struct sfe_xfrm_dev_stats, GFP_KERNEL | __GFP_ZERO);
+	if (!dev->stats_pcpu) {
+		pr_err("%px: Failed to allocate stats memory for encap\n", ndev);
+		ndev->priv_destructor(ndev);
+		return NULL;
+	}
+
+	rtnl_lock();
+
+	/*
+	 * Register netdevice with kernel.
+	 * Note: Linux will invoke the destructor upon failure
+	 */
+	status = register_netdevice(ndev);
+	if (status < 0) {
+		pr_err("%px: Failed to register netdevce, error(%d)\n", ndev, status);
+		rtnl_unlock();
+		return NULL;
+	}
+
+	/*
+	 * Set netdevice to UP state.
+	 */
+	status = dev_open(ndev, NULL);
+	if (status < 0) {
+		pr_err("%px: Failed to Open netdevce, error(%d)\n", ndev, status);
+		unregister_netdevice(ndev);
+		rtnl_unlock();
+		return NULL;
+	}
+
+	rtnl_unlock();
+
+	dev->dentry = debugfs_create_file(ndev->name, S_IRUGO, g_xfrm->dentry, dev, &sfe_xfrm_dev_file_ops);
+	if (IS_ERR_OR_NULL(dev->dentry)) {
+		pr_warn("%px: Failed to allocate dentry for %s\n", ndev, ndev->name);
+	}
+
+	kref_init(&dev->ref);
+
+	/*
+	 * Add the net_device entry into the db.
+	 */
+	spin_lock_bh(&g_xfrm->lock);
+	list_add(&dev->node, &g_xfrm->dev_head);
+	spin_unlock_bh(&g_xfrm->lock);
+
+	return dev;
+}
+
+/*
+ * sfe_xfrm_dev_final()
+ *	Delete existing IPsec device.
+ */
+static void sfe_xfrm_dev_final(struct kref *kref)
+{
+	struct sfe_xfrm_dev *dev = container_of(kref, struct sfe_xfrm_dev, ref);
+	struct sfe_xfrm *g_xfrm = &g_sfe_xfrm;
+	struct net_device *ndev = dev->ndev;
+
+	/*
+	 * Delete the net_device entry from the db.
+	 */
+	BUG_ON(dev->xfrm_reqid < 0);
+
+	spin_lock_bh(&g_xfrm->lock);
+	list_del_init(&dev->node);
+	spin_unlock_bh(&g_xfrm->lock);
+
+	/*
+	 * Bring down the device and unregister from linux.
+	 */
+	unregister_netdev(ndev);
+}
+
+/*
+ * sfe_xfrm_esp_init_state()
+ *	Initialize IPsec xfrm state of type ESP.
+ */
+static int sfe_xfrm_esp_init_state(struct xfrm_state *xs)
+{
+	struct sfe_xfrm_dev *dev;
+	int ret;
+
+	/*
+	 * SA flag shouldn't be set
+	 */
+	BUG_ON(xs->xflags & XFRM_STATE_OFFLOAD_NSS);
+
+	/*
+	 * verify whether the xfrm state can be offloaded or not.
+	 */
+	if (xs->props.mode != XFRM_MODE_TUNNEL) {
+		pr_warn("%px: SFE/IPsec transport mode not supported\n", xs);
+		return -ENOTSUPP;
+	}
+
+	if (xs->encap && (xs->encap->encap_type != UDP_ENCAP_ESPINUDP)) {
+		pr_warn("%px: SFE/IPsec UDP encap type(%d) not supported\n", xs, xs->encap->encap_type);
+		return -ENOTSUPP;
+	}
+
+	/*
+	 * Before adding a new SA object, run through the db and find out
+	 * if there is a net device already created for the given (id).
+	 * If not present, then create one.
+	 */
+	dev = sfe_xfrm_dev_add_ref(xs->props.reqid);
+	if (!dev) {
+		pr_err("%px: Unable to fetch/add netdevice for this id %d\n", xs, xs->props.reqid);
+		return -1;
+	}
+
+	/*
+	 * Create and add the SA object.
+	 */
+	ret = sfe_xfrm_sa_add(dev, xs);
+	if (ret < 0) {
+		pr_warn("%px: unable to offload xfrm_state\n", xs);
+
+		/*
+		 * Reference: sfe_xfrm_dev_add_ref()
+		 */
+		kref_put(&dev->ref, sfe_xfrm_dev_final);
+		return ret;
+	}
+
+	xs->offload_dev = dev->ndev;
+	xs->xflags |= XFRM_STATE_OFFLOAD_NSS;
+	return 0;
+}
+
+/*
+ * sfe_xfrm_esp_deinit_state()
+ *	Destroy IPsec xfrm state of type ESP.
+ */
+static void sfe_xfrm_esp_deinit_state(struct xfrm_state *xs)
+{
+	struct net_device *ndev;
+	struct sfe_xfrm_dev *dev;
+
+	/*
+	 * Check if the xfrm state is already offloaded or not.
+	 */
+	if (unlikely(!(xs->xflags & XFRM_STATE_OFFLOAD_NSS))) {
+		pr_warn("%px: xfrm_state is not offloaded\n", xs);
+		return;
+	}
+
+	/*
+	 * Fetch the net_device from the xfrm state.
+	 */
+	ndev = xs->offload_dev;
+	dev = netdev_priv(ndev);
+
+	sfe_xfrm_sa_del(dev, xs);
+
+	/*
+	 * Reference: sfe_xfrm_dev_add_ref()
+	 */
+	kref_put(&dev->ref, sfe_xfrm_dev_final);
+	return;
+}
+
+/*
+ * sfe_xfrm_esp_get_mtu()
+ *	Get mtu for inner packet.
+ */
+static uint32_t sfe_xfrm_esp_get_mtu(struct xfrm_state *xs, int mtu)
+{
+	struct net_device *ndev;
+
+	/*
+	 * WARN_ON if the xfrm_state is not offloaded.
+	 */
+	WARN_ON(!(xs->xflags & XFRM_STATE_OFFLOAD_NSS));
+
+	/*
+	 * since we are tracking each encap SA using a unique
+	 * netdevice, hence net_device mtu is the same as SA mtu.
+	 */
+	ndev = xs->offload_dev;
+	BUG_ON(!ndev);
+
+	/*
+	 * FIXME: return the overhead value
+	 */
+	return ndev->mtu;
+}
+
+/*
+ * Trapping ipv4 packets to be sent for ipsec encapsulation.
+ */
+static struct xfrm_state_afinfo xfrm_v4_afinfo = {
+	.family = AF_INET,
+	.proto = IPPROTO_IPIP,
+	.output = sfe_xfrm_v4_output,
+	.output_finish = NULL,
+	.extract_input = NULL,
+	.extract_output = NULL,
+	.transport_finish = NULL,
+	.local_error = NULL,
+};
+
+/*
+ * ESP proto specific init/de-init handlers for ipv4.
+ */
+static const struct xfrm_type xfrm_v4_type = {
+	.description = "SFE ESP4",
+	.owner = THIS_MODULE,
+	.proto = IPPROTO_ESP,
+	.flags = XFRM_TYPE_REPLAY_PROT,
+	.init_state = sfe_xfrm_esp_init_state,
+	.destructor = sfe_xfrm_esp_deinit_state,
+	.get_mtu = sfe_xfrm_esp_get_mtu,
+	.input = NULL,
+	.output = NULL,
+};
+
+/*
+ * IPv4 ESP handler
+ */
+static struct net_protocol esp_protocol = {
+	.handler = sfe_xfrm_dec_esp4,
+	.no_policy = 1,
+	.netns_ok  = 1,
+};
+
+/*
+ * sfe_xfrm_override_afinfo()
+ *	Override the native linux afinfo object.
+ */
+static void sfe_xfrm_override_afinfo(uint16_t family)
+{
+	const struct xfrm_type *type_dstopts, *type_routing;
+	const struct xfrm_type *type_ipip, *type_ipv6;
+	const struct xfrm_type *type_ah, *type_comp;
+	struct xfrm_state_afinfo *afinfo = NULL;
+	const struct xfrm_type *base;
+
+	/*
+	 * Override ESP type.
+	 */
+	if (family == AF_INET) {
+		base = &xfrm_v4_type;
+		afinfo = xfrm_state_update_afinfo(AF_INET, &xfrm_v4_afinfo);
+	}
+
+	/*
+	 * TODO: Add ipv6 support
+	 */
+	BUG_ON(family == AF_INET6);
+
+	xfrm_register_type(base, family);
+
+	type_ah = afinfo->type_ah;
+	type_comp = afinfo->type_comp;
+	type_ipip = afinfo->type_ipip;
+	type_ipv6 = afinfo->type_ipip6;
+	type_dstopts = afinfo->type_dstopts;
+	type_routing = afinfo->type_routing;
+
+	/*
+	 * Register types
+	 *
+	 * Propagating the registered xfrm_type from
+	 * old afinfo object into new object.
+	 */
+	if (type_ah) {
+		xfrm_register_type(type_ah, family);
+	}
+
+	if (type_comp) {
+		xfrm_register_type(type_comp, family);
+	}
+
+	if (type_ipip) {
+		xfrm_register_type(type_ipip, family);
+	}
+
+	if (type_ipv6) {
+		xfrm_register_type(type_ipv6, family);
+	}
+
+	if (type_dstopts) {
+		xfrm_register_type(type_dstopts, family);
+	}
+
+	if (type_routing) {
+		xfrm_register_type(type_routing, family);
+	}
+}
+
+/*
+ * sfe_xfrm_ctrl_init()
+ *	initialization function
+ */
+void sfe_xfrm_ctrl_init(void)
+{
+	int err;
+
+	err = inet_update_protocol(&esp_protocol, IPPROTO_ESP, &linux_esp_handler);
+	BUG_ON(err < 0);
+
+	/*
+	 * overide the xfrm_state afinfo.
+	 */
+	sfe_xfrm_override_afinfo(AF_INET);
+}
diff --git a/qca-nss-sfe/tunnels/ipsec/sfe_xfrm_dec.c b/qca-nss-sfe/tunnels/ipsec/sfe_xfrm_dec.c
new file mode 100644
index 0000000..2332eac
--- /dev/null
+++ b/qca-nss-sfe/tunnels/ipsec/sfe_xfrm_dec.c
@@ -0,0 +1,366 @@
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <crypto/aes.h>
+#include <crypto/gcm.h>
+#include <crypto/algapi.h>
+
+#include "sfe_xfrm.h"
+
+/*
+ * sfe_xfrm_dec_esp()
+ *	Decrypt and remove ESP header.
+ */
+static void sfe_xfrm_dec_esp(struct sfe_xfrm_sa *sa, struct sk_buff *skb)
+{
+	struct sfe_xfrm_dev_stats *dev_stats;
+	struct sfe_xfrm_sa_stats *sa_stats;
+	struct sfe_xfrm_sa_state_dec *dec;
+	struct xfrm_state *xs = sa->xs;
+	struct ip_esp_trailer *espt;
+	unsigned int inner_len;
+	struct sec_path *sp;
+
+	dev_stats = this_cpu_ptr(sa->dev->stats_pcpu);
+	sa_stats = this_cpu_ptr(sa->stats_pcpu);
+	dec = &sa->state.dec;
+
+	/*
+	 * Update Rx statistics.
+	 */
+	sa_stats->rx_pkts++;
+	sa_stats->rx_bytes += skb->len;
+
+	/*
+	 * Decrypt the packet.
+	 */
+	if (dec->auth_decrypt(sa, skb)) {
+		pr_debug("%p: Failed to decrypt packet\n", sa);
+		goto drop;
+	}
+
+	/*
+	 * Remove the ESP header.
+	 */
+	skb_pull(skb, sizeof(struct ip_esp_hdr) + dec->iv_len);
+	skb_reset_network_header(skb);
+
+	/*
+	 * Read ESP trailer.
+	 * TODO: Add pad verification logic here
+	 */
+	espt = (struct ip_esp_trailer *)(skb_tail_pointer(skb) - dec->icv_len - sizeof(*espt));
+	switch (espt->next_hdr) {
+	case IPPROTO_IPIP:
+		skb->protocol = htons(ETH_P_IP);
+		skb_set_transport_header(skb, sizeof(struct iphdr));
+		inner_len = ntohs(ip_hdr(skb)->tot_len);
+		break;
+
+	case IPPROTO_IPV6:
+		skb->protocol = htons(ETH_P_IPV6);
+		skb_set_transport_header(skb, sizeof(struct ipv6hdr));
+		inner_len = ntohs(ipv6_hdr(skb)->payload_len + sizeof(struct ipv6hdr));
+		break;
+
+	default:
+		/*
+		 * This may be dummy packet, Consume it.
+		 */
+		dev_stats->rx_dummy++;
+		consume_skb(skb);
+		return;
+	}
+
+	/*
+	 * Reset General SKB fields for further processing.
+	 */
+	skb_scrub_packet(skb, false);
+	skb_reset_mac_header(skb);
+	skb->ip_summed = CHECKSUM_NONE;
+	skb->dev = sa->ndev;
+	skb->skb_iif = sa->ifindex;
+	skb_trim(skb, inner_len);
+
+	/*
+	 * Linux requires sp in SKB when xfrm is enabled.
+	 */
+	sp = secpath_set(skb);
+	if(!sp) {
+		sa_stats->fail_sp_alloc++;
+		goto drop;
+	}
+
+	xfrm_state_hold(xs);
+	sp->xvec[sp->len++] = xs;
+
+	/*
+	 * Update Tx statistics.
+	 */
+	sa_stats->tx_pkts++;
+	sa_stats->tx_bytes += skb->len;
+	dev_stats->rx_pkts++;
+	dev_stats->rx_bytes += skb->len;
+
+	netif_receive_skb(skb);
+	return;
+
+drop:
+	sa_stats->fail_transform++;
+	dev_kfree_skb_any(skb);
+}
+
+/*
+ * sfe_xfrm_auth_decrypt_gcm()
+ *	Decrypt the SKB. SKB data must be pointing to ESP header.
+ */
+int sfe_xfrm_auth_decrypt_gcm(struct sfe_xfrm_sa *sa, struct sk_buff *skb)
+{
+	struct sfe_xfrm_sa_state_dec *dec = &sa->state.dec;
+	struct crypto_sync_skcipher *etfm = sa->enc_tfm;
+	struct crypto_shash *atfm = sa->auth_tfm;
+	uint8_t zero_enc[AES_BLOCK_SIZE] = {0};
+	uint8_t calc_hmac[GHASH_BLOCK_SIZE];
+	uint8_t icv_len = dec->icv_len;
+	uint32_t iv[4], *data;
+	uint32_t crypt_len;
+	uint8_t *pkt_hmac;
+	int ret;
+
+	crypt_len = skb->len - sizeof(struct ip_esp_hdr) - GCM_RFC4106_IV_SIZE - icv_len;
+
+	/*
+	 * Make data point to IV.
+	 */
+	data = (uint32_t *)(skb->data + sizeof(struct ip_esp_hdr));
+
+	iv[0] = dec->nonce;	/* Nonce */
+	iv[1] = *data++;	/* Explicit IV word 0 */
+	iv[2] = *data++;	/* Explicit IV word 1 */
+	iv[3] = htonl(0x1);	/* CTR counter start value */
+
+	/*
+	 * Generate hash for encrypted data.
+	 * do-while used to reduce stack utilzation for ON_STACK variable.
+	 */
+	do {
+		uint8_t zero_pad[GHASH_BLOCK_SIZE] = {0};
+		uint8_t unaligned_len;
+		be128 final_blk;
+		SHASH_DESC_ON_STACK(areq, atfm);
+
+		areq->tfm = atfm;
+		ret = crypto_shash_init(areq);
+		BUG_ON(ret);
+
+		/*
+		 * Authenticate the ESP header.
+		 */
+		crypto_shash_update(areq, skb->data, sizeof(struct ip_esp_hdr));
+
+		/*
+		 * Authenticate, a Fixed 8-byte padding for ESP header.
+		 */
+		crypto_shash_update(areq, zero_pad, 8);
+
+		/*
+		 * Authenticate, payload minus iv.
+		 */
+		crypto_shash_update(areq, (uint8_t *)data, crypt_len);
+
+		/*
+		 * If, payload is unaligned then authenticate additonal pad bytes
+		 */
+		unaligned_len = crypt_len & (GHASH_BLOCK_SIZE - 1);
+		if (unaligned_len) {
+			crypto_shash_update(areq, zero_pad, GHASH_BLOCK_SIZE - unaligned_len);
+		}
+
+		/*
+		 * Final block contains length in bits.
+		 */
+		final_blk.a = cpu_to_be64(sizeof(struct ip_esp_hdr) * 8);
+		final_blk.b = cpu_to_be64(crypt_len * 8);
+		ret = crypto_shash_finup(areq, (uint8_t *)&final_blk, sizeof(final_blk), calc_hmac);
+		BUG_ON(ret);
+	} while (0);
+
+	/*
+	 * Decrypt the data.
+	 * do-while used to reduce stack utilzation for ON_STACK variable.
+	 */
+	do {
+		struct scatterlist sg[2];
+		SYNC_SKCIPHER_REQUEST_ON_STACK(ereq, etfm);
+
+		sg_init_table(sg, 2);
+		sg_set_buf(&sg[0], zero_enc, sizeof(zero_enc));
+		sg_set_buf(&sg[1], (uint8_t *)data, crypt_len);
+		skcipher_request_set_sync_tfm(ereq, etfm);
+		skcipher_request_set_callback(ereq, 0, NULL, NULL);
+		skcipher_request_set_crypt(ereq, sg, sg, crypt_len + sizeof(zero_enc), iv);
+
+		ret = crypto_skcipher_decrypt(ereq);
+		BUG_ON(ret);
+	} while (0);
+
+	/*
+	 * Verify received hmac and calculated hmac.
+	 */
+	crypto_xor(calc_hmac, zero_enc, GHASH_BLOCK_SIZE);
+	pkt_hmac = skb_tail_pointer(skb) - icv_len;
+	ret = crypto_memneq(calc_hmac, pkt_hmac, icv_len);
+	if (ret) {
+		pr_debug("%p: Failed to authenticate; err(%d)\n", sa, ret);
+		return -EBADMSG;
+	}
+
+	return 0;
+}
+
+/*
+ * sfe_xfrm_dec_esp4()
+ *	ESP Protocol handler for IPv4 IPsec encapsulated packets.
+ */
+int sfe_xfrm_dec_esp4(struct sk_buff *skb)
+{
+	bool nonlinear = skb_is_nonlinear(skb);
+	struct sfe_xfrm_dev_stats *dev_stats;
+	struct net *net = dev_net(skb->dev);
+	struct sfe_xfrm_dev *dev;
+	struct ip_esp_hdr *esph;
+	struct sfe_xfrm_sa *sa;
+	struct xfrm_state *xs;
+	struct iphdr *iph;
+
+	/*
+	 * Unshare the SKB as we will be modifying it.
+	 */
+	if (unlikely(skb_shared(skb))) {
+		pr_debug("%px: Shared SKB\n", skb);
+		XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
+		goto drop;
+	}
+
+	/*
+	 * Linearize the nonlinear SKB.
+	 * TODO: add support for SG.
+	 */
+	if (nonlinear && __skb_linearize(skb)) {
+		pr_debug("%px: Failed to linearize the SKB\n", skb);
+		XFRM_INC_STATS(net, LINUX_MIB_XFRMINBUFFERERROR);
+		goto drop;
+	}
+
+	/*
+	 * Set header pointer after linearize as it may reallocate the SKB.
+	 */
+	iph = ip_hdr(skb);
+	esph = ip_esp_hdr(skb);
+
+	/*
+	 * Lookup xfrm state.
+	 * TODO: Optimize with local database.
+	 * FIXME: Use reference counting to hold objects
+	 */
+	xs = xfrm_state_lookup(net, skb->mark, (xfrm_address_t *)&iph->daddr, esph->spi, IPPROTO_ESP, AF_INET);
+	if (!xs) {
+		pr_debug("IPv4 SA not found %pI4n %x\n", &iph->daddr, esph->spi);
+		XFRM_INC_STATS(net, LINUX_MIB_XFRMINNOSTATES);
+		goto drop;
+	}
+
+	/*
+	 * Only process packets for XFRM state managed by IPsec offload
+	 */
+	if (!(xs->xflags & XFRM_STATE_OFFLOAD_NSS)) {
+		pr_debug("%px: state is not offloaded; xfrm_state %p :drop\n", skb, xs);
+		XFRM_INC_STATS(net, LINUX_MIB_XFRMINSTATEINVALID);
+		goto fail;
+	}
+
+	/*
+	 * SA is stored in private data.
+	 */
+	sa = xs->data;
+	dev = netdev_priv(xs->offload_dev);
+	dev_stats = this_cpu_ptr(dev->stats_pcpu);
+	dev_stats->rx_linearize += nonlinear;
+
+	sfe_xfrm_dec_esp(sa, skb);
+
+	xfrm_state_put(xs);
+	return 0;
+
+fail:
+	xfrm_state_put(xs);
+drop:
+	dev_kfree_skb_any(skb);
+	return 0;
+}
+
+/*
+ * sfe_xfrm_dec_natt()
+ *	Handle UDP encapsulated IPsec packets.
+ */
+int sfe_xfrm_dec_natt(struct sock *sk, struct sk_buff *skb)
+{
+	unsigned int len = skb->len;
+	struct ip_esp_hdr *esph;
+	uint8_t *data;
+
+	/*
+	 * Socket has to be of type UDP_ENCAP_ESPINUDP.
+	 */
+	BUG_ON(udp_sk(sk)->encap_type != UDP_ENCAP_ESPINUDP);
+
+	/*
+	 * Set data pointer to UDP payload.
+	 */
+	data = skb_transport_header(skb) + sizeof(struct udphdr);
+
+	/*
+	 * NAT-keepalive packet has udphdr & one byte payload (rfc3948).
+	 * Consume the packet.
+	 */
+	if (len == 1 && *data == 0xff) {
+		consume_skb(skb);
+		return 0;
+	}
+
+	len -= sizeof(struct udphdr);
+
+	/*
+	 * Check if packet has non-ESP marker (rfc3948).
+	 * Let it pass to user.
+	 */
+	esph = (struct ip_esp_hdr *)data;
+	if ((len < sizeof(*esph)) || !esph->spi) {
+		return 1;
+	}
+
+	/*
+	 * ESPinUDP, Make SKB to point ESP.
+	 */
+	__skb_pull(skb, sizeof(struct udphdr));
+	skb_reset_transport_header(skb);
+
+	return sfe_xfrm_dec_esp4(skb);
+}
diff --git a/qca-nss-sfe/tunnels/ipsec/sfe_xfrm_enc.c b/qca-nss-sfe/tunnels/ipsec/sfe_xfrm_enc.c
new file mode 100644
index 0000000..d54b590
--- /dev/null
+++ b/qca-nss-sfe/tunnels/ipsec/sfe_xfrm_enc.c
@@ -0,0 +1,424 @@
+/*
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ */
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <crypto/aes.h>
+#include <crypto/gcm.h>
+#include <crypto/algapi.h>
+
+#include "sfe_xfrm.h"
+
+static uint16_t ip4_id = 1; /* IPv4 header Identifier */
+
+/*
+ * sfe_xfrm_add_ipv4()
+ *	Add IPv4 header.
+ */
+static inline void sfe_xfrm_add_ipv4(struct sfe_xfrm_sa *sa, struct sk_buff *skb, uint8_t ip_proto)
+{
+	struct sfe_xfrm_sa_hdr *hdr = &sa->hdr;
+	struct iphdr *iph;
+
+	iph = skb_push(skb, sizeof(struct iphdr));
+	skb_reset_network_header(skb);
+	iph->version = IPVERSION;
+	iph->ihl = sizeof(struct iphdr) >> 2;
+	iph->tos = 0;
+	iph->tot_len = htons(skb->len);
+	iph->id = htons(ip4_id++);
+	iph->frag_off = 0;
+	iph->ttl = IPDEFTTL;
+	iph->protocol = ip_proto;
+	iph->saddr = hdr->src_ip[0];
+	iph->daddr = hdr->dst_ip[0];
+	skb->ip_summed = CHECKSUM_NONE;
+	iph->check = 0;
+	iph->check = ip_fast_csum(iph, iph->ihl);
+}
+
+/*
+ * sfe_xfrm_add_udp()
+ *	Add UDP header.
+ */
+static inline void sfe_xfrm_add_udp(struct sfe_xfrm_sa *sa, struct sk_buff *skb)
+{
+	struct sfe_xfrm_sa_hdr *hdr = &sa->hdr;
+	struct udphdr *uh;
+
+	uh = __skb_push(skb, sizeof(struct udphdr));
+	skb_reset_transport_header(skb);
+
+	uh->dest = hdr->dport;
+	uh->source = hdr->sport;
+	uh->len = htons(skb->len);
+	uh->check = 0;
+}
+
+/*
+ * sfe_xfrm_add_esp_gcm()
+ *	Add ESP header and trailer. This is specifically optimized for GCM.
+ */
+static inline void sfe_xfrm_add_esp(struct sfe_xfrm_sa *sa, struct sk_buff *skb, uint8_t proto)
+{
+	struct sfe_xfrm_sa_state_enc *enc = &sa->state.enc;
+	struct sfe_xfrm_sa_hdr *hdr = &sa->hdr;
+	struct ip_esp_trailer *trailer;
+	struct ip_esp_hdr *esph;
+	uint16_t pad_len;
+	uint16_t blk_len;
+	uint8_t *pad;
+	uint8_t i;
+
+	/*
+	 * Add ESP header & IV.
+	 * IV will be filled in encrypt_auth()
+	 */
+	esph = (struct ip_esp_hdr *)__skb_push(skb, sizeof(*esph) + enc->iv_len);
+	esph->spi = hdr->spi;
+	esph->seq_no = htonl(enc->esp_seq++);
+
+	/*
+	 * Add the padding.
+	 */
+	blk_len = enc->blk_len;
+	pad_len = ALIGN(skb->len + sizeof(*trailer), blk_len) - (skb->len + sizeof(*trailer));
+
+	/*
+	 * Add ESP trailer and ICV.
+	 * ICV will be filled during encrypt_auth().
+	 */
+	pad = __skb_put(skb, pad_len + sizeof(*trailer) + enc->icv_len);
+	for (i = 1; i <= pad_len; i++) {
+		*pad++ = i;
+	}
+
+	trailer = (struct ip_esp_trailer *)pad;
+	trailer->pad_len = pad_len;
+	trailer->next_hdr = proto;
+}
+
+/*
+ * sfe_xfrm_ip4_send()
+ *	transmit encapsulated packet out.
+ */
+void sfe_xfrm_ip4_send(struct sfe_xfrm_sa *sa, struct sk_buff *skb)
+{
+	struct sfe_xfrm_sa_state_enc *enc = &sa->state.enc;
+	struct sfe_xfrm_dev_stats *dev_stats;
+	struct sfe_xfrm_sa_stats *sa_stats;
+	struct dst_entry *dst;
+	struct rtable *rt;
+
+	dev_stats = this_cpu_ptr(sa->dev->stats_pcpu);
+	sa_stats = this_cpu_ptr(sa->stats_pcpu);
+
+	dst = dst_cache_get(&enc->dst_cache);
+	if (likely(dst)) {
+		goto send_buf;
+	}
+
+	rt = ip_route_output(&init_net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr, 0, 0);
+	if (IS_ERR(rt)) {
+		sa_stats->fail_route++;
+		dev_stats->tx_fail++;
+		dev_kfree_skb_any(skb);
+		return;
+	}
+
+	dst = &rt->dst;
+	sa_stats->fail_dst_cache++;
+	dst_cache_set_ip4(&enc->dst_cache, dst, ip_hdr(skb)->saddr);
+
+send_buf:
+	/*
+	 * Drop existing dst and set new.
+	 */
+	skb_scrub_packet(skb, false);
+	skb_dst_set(skb, dst);
+
+	/*
+	 * Reset General SKB fields for further processing.
+	 */
+	skb->protocol = htons(ETH_P_IP);
+	skb->skb_iif = sa->ifindex;
+	skb->ip_summed = CHECKSUM_COMPLETE;
+
+	memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
+	IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
+
+	/*
+	 * Send packet out and update Tx statistics.
+	 */
+	sa_stats->tx_pkts++;
+	sa_stats->tx_bytes += skb->len;
+	dev_stats->tx_pkts++;
+	dev_stats->tx_bytes += skb->len;
+	ip_local_out(&init_net, NULL, skb);
+}
+
+/*
+ * sfe_xfrm_encrypt_auth_gcm()
+ *	Encrypt the SKB. SKB data must be pointing to ESP header & block aligned.
+ */
+void sfe_xfrm_encrypt_auth_gcm(struct sfe_xfrm_sa *sa, struct sk_buff *skb)
+{
+	struct sfe_xfrm_sa_state_enc *enc = &sa->state.enc;
+	struct crypto_sync_skcipher *etfm = sa->enc_tfm;
+	struct crypto_shash *atfm = sa->auth_tfm;
+	uint8_t zero_enc[AES_BLOCK_SIZE] = {0};
+	uint8_t *esph, *data, *pkt_hmac;
+	uint32_t iv[4], *pkt_iv;
+	uint32_t data_len;
+	int ret;
+
+	esph = skb->data + enc->esp_offset;
+	pkt_iv = (uint32_t *)(esph + sizeof(struct ip_esp_hdr));
+	pkt_hmac = skb_tail_pointer(skb) - enc->icv_len;
+
+	/*
+	 * Generate IV for encryption.
+	 */
+	iv[0] = enc->nonce;	/* Nonce */
+	iv[1] = enc->iv_seq[0] ^ enc->salt[0];	/* Explicit IV 0 */
+	iv[2] = enc->iv_seq[1] ^ enc->salt[1];	/* Explicit IV 1 */
+	iv[3] = htonl(0x1);	/* CTR counter start value */
+	(*(uint64_t *)&enc->iv_seq)++;
+
+	/*
+	 * Copy explicit IV to packet.
+	 */
+	*pkt_iv++ = iv[1];
+	*pkt_iv++ = iv[2];
+
+	/*
+	 * Set cipher data start and length.
+	 */
+	data = (uint8_t *)pkt_iv;
+	data_len = pkt_hmac - data;
+
+	/*
+	 * Encrypt the data.
+	 * do-while used to reduce stack utilzation for ON_STACK variable.
+	 */
+	do {
+		struct scatterlist sg[2];
+
+		/*
+		 * TODO: Allocate on heap
+		 */
+		SYNC_SKCIPHER_REQUEST_ON_STACK(ereq, etfm);
+
+		sg_init_table(sg, 2);
+		sg_set_buf(&sg[0], zero_enc, sizeof(zero_enc));
+		sg_set_buf(&sg[1], data, data_len);
+
+		skcipher_request_set_sync_tfm(ereq, etfm);
+		skcipher_request_set_callback(ereq, 0, NULL, NULL);
+		skcipher_request_set_crypt(ereq, sg, sg, data_len + sizeof(zero_enc), iv);
+
+		ret = crypto_skcipher_encrypt(ereq);
+		BUG_ON(ret);
+
+	} while (0);
+
+	/*
+	 * Generate hash for encrypted data.
+	 * do-while used to reduce stack utilzation for ON_STACK variable.
+	 */
+	do {
+		uint8_t zero_pad[GHASH_BLOCK_SIZE] = {0};
+		uint8_t unaligned_len;
+		be128 final_blk;
+
+		/*
+		 * TODO: Allocate this on heap
+		 */
+		SHASH_DESC_ON_STACK(areq, atfm);
+
+		areq->tfm = atfm;
+
+		ret = crypto_shash_init(areq);
+		BUG_ON(ret);
+
+		/*
+		 * Authenticate the ESP header
+		 */
+		crypto_shash_update(areq, esph, sizeof(struct ip_esp_hdr));
+
+		/*
+		 * Authenticate, a Fixed 8-byte padding for ESP header.
+		 */
+		crypto_shash_update(areq, zero_pad, 8);
+
+		/*
+		 * Authenticate, payload minus iv
+		 */
+		crypto_shash_update(areq, data, data_len);
+
+		/*
+		 * If, payload is unaligned then authenticate additonal pad bytes
+		 */
+		unaligned_len = data_len & (GHASH_BLOCK_SIZE - 1);
+		if (unaligned_len) {
+			crypto_shash_update(areq, zero_pad, GHASH_BLOCK_SIZE - unaligned_len);
+		}
+
+		/*
+		 * Final block contains length in bits.
+		 * Generate HMAC directly in SKB tail.
+		 * We may be writing more than icv_len but it is OK as we have enough tailroom.
+		 */
+		final_blk.a = cpu_to_be64(sizeof(struct ip_esp_hdr) * 8);
+		final_blk.b = cpu_to_be64(data_len * 8);
+
+		ret = crypto_shash_finup(areq, (uint8_t *)&final_blk, sizeof(final_blk), pkt_hmac);
+		BUG_ON(ret);
+
+		crypto_xor(pkt_hmac, zero_enc, GHASH_BLOCK_SIZE);
+	} while (0);
+}
+
+/*
+ * sfe_xfrm_add_hdr_natt()
+ *	Add IPv4 encapsulation headers for NATT.
+ */
+void sfe_xfrm_add_hdr_natt(struct sfe_xfrm_sa *sa, struct sk_buff *skb)
+{
+	/*
+	 * Insert ESP, UDP & IP header.
+	 */
+	sfe_xfrm_add_esp(sa, skb, IPPROTO_IPIP);
+	sfe_xfrm_add_udp(sa, skb);
+	sfe_xfrm_add_ipv4(sa, skb, IPPROTO_UDP);
+}
+
+/*
+ * sfe_xfrm_add_hdr_v4()
+ *	Add IPv4 encapsulation headers.
+ */
+void sfe_xfrm_add_hdr_v4(struct sfe_xfrm_sa *sa, struct sk_buff *skb)
+{
+	/*
+	 * Insert ESP & IP header.
+	 */
+	sfe_xfrm_add_esp(sa, skb, IPPROTO_IPIP);
+	sfe_xfrm_add_ipv4(sa, skb, IPPROTO_ESP);
+}
+
+/*
+ * sfe_xfrm_add_hdr_ip6()
+ *	Add IPv6 encapsulation header & encrypt.
+ */
+void sfe_xfrm_add_hdr_ip6(struct sfe_xfrm_sa *sa, struct sk_buff *skb)
+{
+	pr_err("%p: Not implemented\n", sa);
+	BUG_ON(1);
+}
+
+/*
+ * sfe_xfrm_enc()
+ *	Encapsulates plaintext packet.
+ */
+netdev_tx_t sfe_xfrm_enc(struct sk_buff *skb, struct net_device *ndev)
+{
+	struct sfe_xfrm_dev *dev = netdev_priv(ndev);
+	bool nonlinear = skb_is_nonlinear(skb);
+	struct sfe_xfrm_dev_stats *dev_stats;
+	struct sfe_xfrm_sa_stats *sa_stats;
+	struct sfe_xfrm_sa_state_enc *enc;
+	struct sfe_xfrm_sa *sa;
+
+	dev_stats = this_cpu_ptr(dev->stats_pcpu);
+
+	/*
+	 * Unshare the SKB as we will be modifying it.
+	 */
+	if (unlikely(skb_shared(skb))) {
+		dev_stats->tx_fail_shared++;
+		goto drop;
+	}
+
+	/*
+	 * Linearize the nonlinear SKB.
+	 * TODO: add support for SG.
+	 */
+	if (nonlinear && __skb_linearize(skb)) {
+		pr_debug("%px: Failed to linearize the SKB\n", ndev);
+		dev_stats->tx_fail_linearize++;
+		goto drop;
+	}
+
+	dev_stats->tx_linearize += nonlinear;
+
+	/*
+	 * First SA in the device encapsulation head is always selected.
+	 */
+	rcu_read_lock_bh();
+
+	sa = rcu_dereference(dev->sa);
+	if (unlikely(!sa)) {
+		pr_debug("%px: Failed to find a valid SA for encapsulation\n", ndev);
+		dev_stats->tx_fail_sa++;
+		goto fail;
+	}
+
+	/*
+	 * Packets with insufficient headroom & tailroom will be dropped
+	 */
+	enc = &sa->state.enc;
+	if ((skb_headroom(skb) < enc->head_room) || (skb_tailroom(skb) < enc->tail_room)) {
+		pr_debug("%px: dropping SKB(%p): hroom(%u) or troom(%u)\n", ndev, skb, skb_headroom(skb), skb_tailroom(skb));
+		dev_stats->tx_fail_hroom += (skb_headroom(skb) < enc->head_room);
+		dev_stats->tx_fail_troom += (skb_tailroom(skb) < enc->tail_room);
+		goto fail;
+	}
+
+	sa_stats = this_cpu_ptr(sa->stats_pcpu);
+	if (unlikely(!enc->esp_seq)) {
+		sa_stats->fail_seq++;
+		pr_debug("%px: ESP Sequence overflowed SPI(0x%X)", skb, htonl(sa->hdr.spi));
+		goto fail;
+	}
+
+	/*
+	 * Update Rx statistics.
+	 */
+	sa_stats->rx_pkts++;
+	sa_stats->rx_bytes += skb->len;
+
+	/*
+	 * Following operations are performed
+	 * 1. Add ESP header to the packet
+	 * 2. Encrypt payload and authenticate
+	 * 3. Add IP headers & transmit
+	 */
+	enc->add_hdr(sa, skb);
+	enc->encrypt_auth(sa, skb);
+	enc->ip_send(sa, skb);
+
+	rcu_read_unlock_bh();
+	return NETDEV_TX_OK;
+
+fail:
+	rcu_read_unlock_bh();
+drop:
+	dev_kfree_skb_any(skb);
+	dev_stats->tx_fail++;
+	return NETDEV_TX_OK;
+}
diff --git a/qca-ssdk/src/hsl/phy/qca808x.c b/qca-ssdk/src/hsl/phy/qca808x.c
index 4a6301a..ee16b6f 100755
--- a/qca-ssdk/src/hsl/phy/qca808x.c
+++ b/qca-ssdk/src/hsl/phy/qca808x.c
@@ -1,6 +1,7 @@
 /*
  * Copyright (c) 2018-2019, 2021, The Linux Foundation. All rights reserved.
- * Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
+ * Copyright (c) 2022-2023 Qualcomm Innovation Center, Inc. All rights reserved.
+ *
  * Permission to use, copy, modify, and/or distribute this software for
  * any purpose with or without fee is hereby granted, provided that the
  * above copyright notice and this permission notice appear in all copies.
@@ -340,7 +341,7 @@
 
 static int qca808x_config_aneg(struct phy_device *phydev)
 {
-	a_uint32_t advertise = 0;
+	a_uint32_t advertise = 0, advertise_old = 0;
 	a_uint16_t phy_data = 0;
 	int err = 0;
 	a_uint32_t dev_id = 0, phy_id = 0;
@@ -380,8 +381,13 @@
 		if(!(advertise & ~(FAL_PHY_ADV_PAUSE | FAL_PHY_ADV_ASY_PAUSE))) {
 			return SW_BAD_VALUE;
 		}
-		err |= qca808x_phy_set_autoneg_adv(dev_id, phy_id, advertise);
-		err |= qca808x_phy_restart_autoneg(dev_id, phy_id);
+		err |= qca808x_phy_get_autoneg_adv(dev_id, phy_id, &advertise_old);
+
+		SSDK_DEBUG("advertise: 0x%x, advertise_old: 0x%x\n", advertise, advertise_old);
+		if(advertise != advertise_old) {
+			err |= qca808x_phy_set_autoneg_adv(dev_id, phy_id, advertise);
+			err |= qca808x_phy_restart_autoneg(dev_id, phy_id);
+		}
 	}
 
 	return err;
diff --git a/qca-ssdk/src/hsl/phy/qca808x_phy.c b/qca-ssdk/src/hsl/phy/qca808x_phy.c
index 43bb27a..bc109e6 100755
--- a/qca-ssdk/src/hsl/phy/qca808x_phy.c
+++ b/qca-ssdk/src/hsl/phy/qca808x_phy.c
@@ -630,8 +630,10 @@
 	rv = qca808x_phy_reg_write(dev_id, phy_id, QCA808X_PHY_CONTROL,
 			     phy_data | QCA808X_CTRL_SOFTWARE_RESET);
 	SW_RTN_ON_ERROR(rv);
+
 	/*the configure will lost when reset.*/
-	rv = qca808x_phy_ms_seed_enable(dev_id, phy_id, A_TRUE);
+	if (qca808x_phy_2500caps(dev_id, phy_id) == A_TRUE)
+		rv = qca808x_phy_ms_seed_enable(dev_id, phy_id, A_TRUE);
 
 	return rv;
 }