Project import generated by Copybara.

GitOrigin-RevId: e777113223835d6173b990cf878a6b4b2eef1fb2
diff --git a/build_scripts/build_all.sh b/build_scripts/build_all.sh
new file mode 100755
index 0000000..111d8c1
--- /dev/null
+++ b/build_scripts/build_all.sh
@@ -0,0 +1,157 @@
+#!/bin/bash
+
+set -e
+
+top_dir=$(readlink -e $(dirname $0)/../../)
+
+declare -A PRODUCT_LIST=([sirocco]="sirocco-p1 sirocco-b1 sirocco-b3 sirocco-b4")
+
+function Help() {
+  echo "Usage: $0  <eureka_workspace> <product/board> [optional build number] [other options]"
+  echo "  valid product : sirocco"
+  echo "  available options: --venv_root=<venv_root>"
+}
+
+function parse_args() {
+  readonly eureka_src_path="$( cd "$( dirname "${1}" )" && pwd )/$(basename $1)"
+  # Set the target profile as per SOC
+  product=""
+  revision=""
+  # Check if user specify product or board
+  if [[ "$2" =~ ^.+-[pb][0-9]$ ]]; then
+    product=`echo $2 | cut -d "-" -f1`
+    revision=`echo $2 | cut -d "-" -f2`
+  else
+    product=$2
+  fi
+  readonly product
+  readonly revision
+
+  deterministic_build=""
+
+  shift # product
+  shift # eureka_src_path
+
+  for arg in "$@"
+  do
+    case $arg in
+      --venv_root=*)
+        pyvenv_path="${arg#*=}"
+        shift # past argument=value
+      ;;
+      *[!0-9]* | '') # not a number
+        echo "unknown option ${arg}"
+      ;;
+      *)
+        build_number=$arg
+        shift
+      ;;
+    esac
+  done
+
+  if [ -z "$build_number" ]; then
+    build_number="${USER}-private_build"
+  else
+    # If we were supplied a build number, this is probably a build server build so
+    # build kernel deterministically
+    deterministic_build="-d"
+  fi
+}
+
+function setup_pyvenv() {
+  if [ -d "${pyvenv_path}" -a -f "${pyvenv_path}/bin/activate" ]; then
+    source ${pyvenv_path}/bin/activate
+    echo "activated virtual environment at ${pyvenv_path}"
+  fi
+}
+
+function IsProductValid() {
+  local available_product
+  local ret=1
+
+  for available_product in ${!PRODUCT_LIST[@]}; do
+    if [ "${available_product}" == "$1" ]; then
+      ret=0
+      break
+    fi
+  done
+
+  return ${ret}
+}
+
+function build_for_board() {
+  board=$1
+
+  # Build kernel
+  pushd ${top_dir}/kernel
+  echo "Build kernel for ${board}..."
+  ./build.sh ${deterministic_build} ${board} ${eureka_src_path}
+  popd
+
+  # Build u-boot
+  pushd ${top_dir}/bootloader
+  echo "Build bootloader for ${board}..."
+  ./build.sh ${deterministic_build} ${board} ${eureka_src_path}
+  popd
+}
+
+function BuildAll() {
+  if [ -z ${eureka_src_path} ]; then
+    echo "eureka_src_path doesn't exist"
+    exit 1
+  fi
+
+  if ! IsProductValid ${product}; then
+    echo "Invalid product: $product"
+    Help
+    exit 1
+  fi
+
+
+  if [ $revision ]; then
+    echo "Build for board ${product}-${revision}"
+    build_for_board $product-$revision
+  else
+    echo "Build for all board revisions for product: ${product}"
+    for board in ${PRODUCT_LIST[${product}]}; do
+      build_for_board $board
+    done
+  fi
+
+  # Build sdk modules
+  pushd ${top_dir}/sdk/qca-ssdk
+  ./build.sh ${product} ${eureka_src_path}
+  popd
+
+  pushd ${top_dir}/sdk/qca-nss-dp
+  ./build.sh ${product} ${eureka_src_path}
+  popd
+
+  pushd ${top_dir}/sdk/qca-nss-sfe
+  ./build.sh ${product} ${eureka_src_path}
+  popd
+
+  pushd ${top_dir}/sdk/qca-nss-ecm
+  ./build.sh ${product} ${eureka_src_path}
+  popd
+}
+
+#########################
+####### Main Entry ######
+#########################
+if [[ "$#" -lt 2 ]] || [[ "$#" -gt 4 ]]; then
+  echo "Error: Incorrect arguments."
+  Help
+  exit 1
+fi
+
+parse_args $@
+
+# Create SDK Version number
+mkdir -p ${eureka_src_path}/vendor/qualcomm/${product}
+sdk_version_manifest=${eureka_src_path}/vendor/qualcomm/${product}/SDK_VERSION
+echo "Build #${build_number}|$(TZ='America/Los_Angeles' date)" > $sdk_version_manifest
+
+setup_pyvenv
+
+BuildAll
diff --git a/build_scripts/release_oss.sh b/build_scripts/release_oss.sh
new file mode 100755
index 0000000..732d06e
--- /dev/null
+++ b/build_scripts/release_oss.sh
@@ -0,0 +1,60 @@
+#!/bin/bash
+# Script for releasing SDK open source software on eureka-partner.
+# Usage:
+#   release_oss.sh <oss_target_directory>
+#
+# Output is chromecast_sdk_oss.tgz in <oss_target_directory>.
+
+# Release all source code under ./kernel.
+function release_kernel() {
+  src=$1/kernel
+  dst=$2/kernel
+  echo "Copying kernel from $src ==> $dst..."
+
+  mkdir -p ${dst}
+  # Clean up source tree.
+  pushd ${src} && git clean -dfx && popd
+
+  rsync -av ${src}/ ${dst} --exclude .git
+}
+
+# Release source code under ./bootloader
+function release_bootloader() {
+  src=$1/bootloader
+  dst=$2/u-boot
+  echo "Copying bootloader from $src ==> $dst..."
+
+  mkdir -p ${dst}
+  # Clean up source tree.
+  pushd ${src} && git clean -dfx && popd
+
+  rsync -av ${src}/ ${dst} --exclude .git
+}
+
+sdk_root=$(readlink -f $(dirname $0)/../../)
+
+if [[ $# != 1 ]]; then
+  echo "Usage: $0 OSS_RELEASE_DIR"
+  exit 1
+fi
+
+oss_root=$(readlink -f $1)
+oss_tmp_dir=${oss_root}/chromecast_sdk_oss
+
+if [[ ${oss_root} == ${sdk_root}/* ]]; then
+  echo "OSS_RELEASE_DIR can't be sub-dir of sdk_root $sdk_root"
+  exit 1
+fi
+
+# Clear and create output dir.
+rm -rf ${oss_tmp_dir}
+mkdir -p ${oss_tmp_dir}
+
+release_kernel ${sdk_root} $oss_tmp_dir
+
+release_bootloader ${sdk_root} ${oss_tmp_dir}
+
+tar zcvf ${oss_root}/chromecast_sdk_oss.tgz -C ${oss_root} chromecast_sdk_oss
+rm -rf ${oss_tmp_dir}
+
+echo "Open Source Software has been released to ${oss_root}/chromecast_sdk_oss.tgz".
diff --git a/build_scripts/setup_env.sh b/build_scripts/setup_env.sh
new file mode 100644
index 0000000..a658366
--- /dev/null
+++ b/build_scripts/setup_env.sh
@@ -0,0 +1,62 @@
+#!/bin/bash
+# Usage:
+#   source setup_env.sh
+#
+# Inputs:
+#   TOP_DIR: where the .repo of prebuilts reside
+#            It is optional. Default is this script's <dir>/ ../..
+#   ENABLE_64BIT_BUILD: if we are building 64bit prebuilts
+#            It is optional. Default is TRUE.
+# Exports:
+#   used for kernel or kernel module configs
+#
+#   CROSS_COMPILER_DIR: where cross compilers are
+#   ARCH: arm or arm64
+#   CROSS_COMPILE: the prefix of cross compiler
+#   LD: the linker to use
+#   PATH: new PATH includes the cross compiler
+#   CROSS_MAKE: make cmd with essential paramerters to do cross make
+#
+# Functions
+#   GetModulePath:
+#     Take <eureka_src_path> and <product> to generate target
+#     kernel module path
+
+if [ ! -d "${TOP_DIR}" ]; then
+  TOP_DIR="$(readlink -e $(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)/../..)"
+fi
+ENABLE_64BIT_BUILD=${ENABLE_64BIT_BUILD:-"false"}
+
+_toolchain_dir=$(readlink -e ${TOP_DIR}/prebuilt/toolchain)
+_num_jobs=$(grep -c processor /proc/cpuinfo)
+
+if [ "${ENABLE_64BIT_BUILD}" = "false" ]; then
+  export CROSS_COMPILER_DIR=${_toolchain_dir}/armv7a/bin
+  export ARCH=arm
+  export CROSS_COMPILE=armv7a-cros-linux-gnueabihf-
+  export LD=${CROSS_COMPILE}ld
+  export CC=${CROSS_COMPILE}gcc
+else
+  export CROSS_COMPILER_DIR=${_toolchain_dir}/aarch64/bin
+  export ARCH=arm64
+  export CROSS_COMPILE=aarch64-cros-linux-gnu-
+  export LD=${CROSS_COMPILE}ld.bfd
+  export CC=${CROSS_COMPILE}clang
+fi
+
+function GetModulePath() {
+  local eureka_src_path=$1
+  local product=$2
+
+  echo "${eureka_src_path}/vendor/qualcomm/${product}/prebuilt/systemfs_overlay/lib/modules"
+}
+
+function GetKernelPath() {
+  local eureka_src_path=$1
+  local product=$2
+
+  echo ${eureka_src_path}/vendor/qualcomm/${product}/prebuilt/kernel
+}
+
+export PATH=${CROSS_COMPILER_DIR}:$PATH
+export CROSS_MAKE="make -j${_num_jobs} CC=${CC} LD=${LD}"
diff --git a/ecm_wifi_plugins/Makefile b/ecm_wifi_plugins/Makefile
new file mode 100644
index 0000000..0a379b2
--- /dev/null
+++ b/ecm_wifi_plugins/Makefile
@@ -0,0 +1,11 @@
+# Makefile for ecm wifi plugin module
+ccflags-y += -I$(obj)/../exports -I$(obj)/..
+ccflags-y += -DECM_WIFI_PLUGIN_BUILD_ID="$(BUILD_ID)"
+ccflags-y += -DECM_WIFI_PLUGIN_DEBUG_LEVEL=2
+ccflags-y += -Wall -Werror
+obj-m += ecm-wifi-plugin.o
+ecm-wifi-plugin-objs := \
+                        ecm_wifi_plugin_emesh.o \
+                        ecm_wifi_plugin_mscs.o \
+                        ecm_wifi_plugin_init.o
+
diff --git a/ecm_wifi_plugins/ecm_wifi_plugin.h b/ecm_wifi_plugins/ecm_wifi_plugin.h
new file mode 100644
index 0000000..2328df1
--- /dev/null
+++ b/ecm_wifi_plugins/ecm_wifi_plugin.h
@@ -0,0 +1,85 @@
+/*
+ ***************************************************************************
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ ***************************************************************************
+ */
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+
+#include <qca_mscs_if.h>
+#include <qca_mesh_latency_if.h>
+#include <ecm_classifier_mscs_public.h>
+#include <ecm_classifier_emesh_public.h>
+
+#if defined(CONFIG_DYNAMIC_DEBUG)
+
+/*
+ * Compile messages for dynamic enable/disable
+ */
+#define ecm_wifi_plugin_warning(s, ...) pr_debug("%s[%d]:" s, __func__, __LINE__, ##__VA_ARGS__)
+#define ecm_wifi_plugin_info(s, ...) pr_debug("%s[%d]:" s, __func__, __LINE__, ##__VA_ARGS__)
+#define ecm_wifi_plugin_trace(s, ...) pr_debug("%s[%d]:" s, __func__, __LINE__, ##__VA_ARGS__)
+#else
+
+/*
+ * Statically compile messages at different levels
+ */
+#if (ECM_WIFI_PLUGIN_DEBUG_LEVEL < 2)
+#define ecm_wifi_plugin_warning(s, ...)
+#else
+#define ecm_wifi_plugin_warning(s, ...) pr_warn("%s[%d]:" s, __func__, __LINE__, ##__VA_ARGS__)
+#endif
+
+#if (ECM_WIFI_PLUGIN_DEBUG_LEVEL < 3)
+#define ecm_wifi_plugin_info(s, ...)
+#else
+#define ecm_wifi_plugin_info(s, ...)   pr_notice("%s[%d]:" s, __func__, __LINE__, ##__VA_ARGS__)
+#endif
+
+#if (ECM_WIFI_PLUGIN_DEBUG_LEVEL < 4)
+#define ecm_wifi_plugin_trace(s, ...)
+#else
+#define ecm_wifi_plugin_trace(s, ...)  pr_info("%s[%d]:" s, __func__, __LINE__, ##__VA_ARGS__)
+#endif
+#endif
+
+/*
+ * ecm_wifi_plugin_mscs_register()
+ *	API to register mscs callbacks.
+ */
+extern int ecm_wifi_plugin_mscs_register(void);
+
+/*
+ * ecm_wifi_plugin_emesh_register()
+ *	API to register emesh callbacks.
+ */
+extern int ecm_wifi_plugin_emesh_register(void);
+
+/*
+ * ecm_wifi_plugin_mscs_unregister()
+ *	API to unregister the mscs callbacks.
+ */
+extern void ecm_wifi_plugin_mscs_unregister(void);
+
+/*
+ * ecm_wifi_plugin_emesh_unregister()
+ *	API to unregister the emesh callbacks.
+ */
+extern void ecm_wifi_plugin_emesh_unregister(void);
diff --git a/ecm_wifi_plugins/ecm_wifi_plugin_emesh.c b/ecm_wifi_plugins/ecm_wifi_plugin_emesh.c
new file mode 100644
index 0000000..5fa3b3e
--- /dev/null
+++ b/ecm_wifi_plugins/ecm_wifi_plugin_emesh.c
@@ -0,0 +1,59 @@
+/*
+ **************************************************************************
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ **************************************************************************
+ */
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+
+#include "ecm_wifi_plugin.h"
+
+/*
+ * ecm_wifi_plugin_emesh
+ * 	Register EMESH client callback with ECM EMSH classifier to update peer mesh latency parameters.
+ */
+static struct ecm_classifier_emesh_callbacks ecm_wifi_plugin_emesh = {
+	.update_peer_mesh_latency_params = qca_mesh_latency_update_peer_parameter,
+};
+
+/*
+ * ecm_wifi_plugin_emesh_register()
+ *	Register emesh callbacks.
+ */
+int ecm_wifi_plugin_emesh_register(void)
+{
+	if (ecm_classifier_emesh_latency_config_callback_register(&ecm_wifi_plugin_emesh)) {
+		ecm_wifi_plugin_warning("ecm emesh classifier callback registration failed.\n");
+		return -1;
+	}
+
+	ecm_wifi_plugin_info("EMESH classifier callbacks registered\n");
+	return 0;
+}
+
+/*
+ * ecm_wifi_plugin_emesh_unregister()
+ *	unregister the emesh callbacks.
+ */
+void ecm_wifi_plugin_emesh_unregister(void)
+{
+	ecm_classifier_emesh_latency_config_callback_unregister();
+	ecm_wifi_plugin_info("EMESH classifier callbacks unregistered\n");
+}
diff --git a/ecm_wifi_plugins/ecm_wifi_plugin_init.c b/ecm_wifi_plugins/ecm_wifi_plugin_init.c
new file mode 100644
index 0000000..af82045
--- /dev/null
+++ b/ecm_wifi_plugins/ecm_wifi_plugin_init.c
@@ -0,0 +1,67 @@
+/*
+ **************************************************************************
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ **************************************************************************
+ */
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+
+#include "ecm_wifi_plugin.h"
+
+/*
+ * ecm_wifi_plugin_init_module()
+ *	ECM_WIFI_PLUGIN module init function
+ */
+int __init ecm_wifi_plugin_init_module(void)
+{
+	int ret = 0;
+
+	ret = ecm_wifi_plugin_mscs_register();
+	if (ret) {
+		ecm_wifi_plugin_warning("MSCS callback registration failed\n");
+		return ret;
+	}
+
+	ret = ecm_wifi_plugin_emesh_register();
+	if (ret) {
+		ecm_wifi_plugin_warning("EMESH callback registration failed\n");
+		return ret;
+	}
+
+	ecm_wifi_plugin_info("ECM_WIFI_PLUGIN module loaded");
+	return 0;
+}
+
+/*
+ * ecm_wifi_plugin_exit_module()
+ *	ECM_WIFI_PLUGIN module exit function
+ */
+static void __exit ecm_wifi_plugin_exit_module(void)
+{
+	ecm_wifi_plugin_mscs_unregister();
+	ecm_wifi_plugin_emesh_unregister();
+	ecm_wifi_plugin_info("ECM_WIFI_PLUGIN unloaded\n");
+}
+
+module_init(ecm_wifi_plugin_init_module);
+module_exit(ecm_wifi_plugin_exit_module);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("ECM_WIFI_PLUGIN module");
diff --git a/ecm_wifi_plugins/ecm_wifi_plugin_mscs.c b/ecm_wifi_plugins/ecm_wifi_plugin_mscs.c
new file mode 100644
index 0000000..b781108
--- /dev/null
+++ b/ecm_wifi_plugins/ecm_wifi_plugin_mscs.c
@@ -0,0 +1,59 @@
+/*
+ **************************************************************************
+ * Copyright (c) 2020-2021, The Linux Foundation. All rights reserved.
+ * Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
+ *
+ * Permission to use, copy, modify, and/or distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
+ * OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ **************************************************************************
+ */
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/of.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+
+#include "ecm_wifi_plugin.h"
+
+/*
+ * ecm_wifi_plugin
+ * 	Register MSCS client callback with ECM MSCS classifier to support MSCS wifi peer lookup.
+ */
+static struct ecm_classifier_mscs_callbacks ecm_wifi_plugin_mscs = {
+	.get_peer_priority = qca_mscs_peer_lookup_n_get_priority,
+};
+
+/*
+ * ecm_wifi_plugin_mscs_register()
+ *	register mscs callbacks.
+ */
+int ecm_wifi_plugin_mscs_register(void)
+{
+	if (ecm_classifier_mscs_callback_register(&ecm_wifi_plugin_mscs)) {
+		ecm_wifi_plugin_warning("ecm mscs classifier callback registration failed.\n");
+		return -1;
+	}
+
+	ecm_wifi_plugin_info("MSCS callbacks registered\n");
+	return 0;
+}
+
+/*
+ * ecm_wifi_plugin_mscs_unregister()
+ *	unregister the mscs callbacks.
+ */
+void ecm_wifi_plugin_mscs_unregister(void)
+{
+	ecm_classifier_mscs_callback_unregister();
+	ecm_wifi_plugin_info("MSCS callbacks unregistered\n");
+}
diff --git a/nat46/.gitignore b/nat46/.gitignore
new file mode 100644
index 0000000..bd9c9a7
--- /dev/null
+++ b/nat46/.gitignore
@@ -0,0 +1,15 @@
+
+# Ignore general build intermediate files
+*.o
+*.o.d
+*.cmd
+*.ko
+*.mod.*
+*.mod
+Module.symvers
+modules.order
+.tmp_versions
+built-in.a
+
+# don't ignore .gitignore itself
+!.gitignore
diff --git a/nat46/README.md b/nat46/README.md
new file mode 100644
index 0000000..ddebbdd
--- /dev/null
+++ b/nat46/README.md
@@ -0,0 +1,32 @@
+nat46
+=====
+
+This is an OpenWRT feed with a Linux kernel module implementing flexible NAT46.
+
+Compiling
+=========
+
+With Barrier Breaker (trunk), add the following line to *feeds.conf.default*:
+```
+src-git nat46 https://github.com/ayourtch/nat46.git
+```
+
+then issue:
+
+```
+./scripts/feeds update -a
+./scripts/feeds install -a -p nat46
+```
+
+This will cause the following to appear in the "make menuconfig":
+
+ * Kernel modules -> Network Support -> kmod-nat46
+
+Managing
+========
+
+The management of the NAT46 interfaces is done via the /proc/net/nat46/control file.
+
+For more information about the module, take a look at the nat46/modules/README file.
+
+
diff --git a/nat46/build.sh b/nat46/build.sh
new file mode 100755
index 0000000..9fccc30
--- /dev/null
+++ b/nat46/build.sh
@@ -0,0 +1,101 @@
+#!/bin/bash
+
+set -e
+set -o errtrace
+trap 'echo Fatal error: script $0 aborting at line $LINENO, command \"$BASH_COMMAND\" returned $?; exit 1' ERR
+
+PRODUCT_LIST="sirocco"
+
+NUM_JOBS=$(grep -c processor /proc/cpuinfo)
+
+MODULE_NAME="nat46"
+###################################################
+# Setup build toollchain
+###################################################
+sdk_top_dir=$(readlink -e $(dirname $0)/..)
+TOP_DIR=$(readlink -e ${sdk_top_dir}/..)
+
+#export ARCH and build related envs.
+source ${TOP_DIR}/sdk/build_scripts/setup_env.sh
+
+kernel_path=$(readlink -e ${sdk_top_dir}/../kernel)
+soc_type=ipq50xx
+nat64_version="1.0"
+extra_cflags=-D"NAT46_VERSION='\"1.0\"'"
+if [ "${ENABLE_64BIT_BUILD}" == "false" ]; then
+  extra_cflags="${extra_cflags} -mlong-calls"
+fi
+
+##################################################
+# Build Kernel Module
+##################################################
+function BuildKModule() {
+    # make kernel module
+    echo "Build ${MODULE_NAME}"
+    ${CROSS_MAKE} -C ${kernel_path} M=${sdk_top_dir}/${MODULE_NAME}/${MODULE_NAME}/modules SoC=${soc_type} EXTRA_CFLAGS="${extra_cflags}" V=1
+}
+
+##################################################
+# Build & Install
+##################################################
+function BuildAndInstall() {
+    local product=$1
+    local eureka_src_path=$(readlink -e $2)
+    if [ -z ${eureka_src_path} ]; then
+      echo "eureka_src_path doesn't exist"
+      exit 1
+    fi
+
+    # clean previous build
+    find . -name "*.o" -delete
+    find . -name "*.cmd" -delete
+    find . -name "*.ko" -delete
+    find . -name "*.mod.*" -delete
+    find . -name "*.mod" -delete
+    find . -name Module.symvers -delete
+
+    # build module
+    BuildKModule
+
+    # install module
+    echo "Install ${MODULE_NAME}.ko"
+    local module_target_dir="$(GetModulePath ${eureka_src_path} ${product})"
+    mkdir -p ${module_target_dir}
+    cp -f ${MODULE_NAME}/modules/${MODULE_NAME}.ko ${module_target_dir}/.
+}
+
+function Usage() {
+    cat << EOF
+Usage:
+    $0 <product> <eureka_src_path>
+    Valid products: ${PRODUCT_LIST}
+EOF
+}
+
+function IsProductValid() {
+  local product
+  local ret=1
+
+  for product in ${PRODUCT_LIST}; do
+    if [ "${product}" == "$1" ]; then
+      ret=0
+      break
+    fi
+  done
+
+  return ${ret}
+}
+
+#########################
+####### Main Entry ######
+#########################
+if (( $# < 2 )); then
+  Usage
+else
+  if IsProductValid $1; then
+    BuildAndInstall $1 $2
+  else
+    echo "$1 is a invalid product"
+    Usage
+  fi
+fi
diff --git a/nat46/nat46/Makefile b/nat46/nat46/Makefile
new file mode 100644
index 0000000..daedbac
--- /dev/null
+++ b/nat46/nat46/Makefile
@@ -0,0 +1,53 @@
+#
+# Copyright (C) 2006-2012 OpenWrt.org
+#
+# This is free software, licensed under the GNU General Public License v2.
+# See /LICENSE for more information.
+#
+
+include $(TOPDIR)/rules.mk
+include $(INCLUDE_DIR)/kernel.mk
+
+PKG_NAME:=nat46
+PKG_VERSION:=0000000
+
+include $(INCLUDE_DIR)/package.mk
+
+define KernelPackage/nat46
+  DEPENDS:=+kmod-ipv6
+  TITLE:=Stateless NAT46 translation kernel module
+  SECTION:=kernel
+  SUBMENU:=Network Support
+  FILES:=$(PKG_BUILD_DIR)/modules/nat46.ko 
+endef
+
+
+define Build/Prepare
+	$(call Build/Prepare/Default)
+	$(CP) -r ./* $(PKG_BUILD_DIR)/
+endef
+
+
+MAKE_KMOD := $(MAKE) -C "$(LINUX_DIR)" \
+		CROSS_COMPILE="$(TARGET_CROSS)" \
+		ARCH="$(LINUX_KARCH)" \
+		PATH="$(TARGET_PATH)" \
+		SUBDIRS="$(PKG_BUILD_DIR)/kmod" \
+
+define Build/Compile
+	# Compile the kernel part
+	$(MAKE_KMOD) \
+		SUBDIRS="$(PKG_BUILD_DIR)/modules" \
+		MODFLAGS="-DMODULE -mlong-calls" \
+		modules
+	pwd
+endef
+
+define KernelPackage/nat46/install
+	$(CP) -r ./files/* $(1)/
+endef
+
+$(eval $(call KernelPackage,nat46))
+
+
+
diff --git a/nat46/nat46/README.md b/nat46/nat46/README.md
new file mode 100644
index 0000000..71c6547
--- /dev/null
+++ b/nat46/nat46/README.md
@@ -0,0 +1,2 @@
+See README in modules/
+
diff --git a/nat46/nat46/files/etc/modules.d/33-nat46 b/nat46/nat46/files/etc/modules.d/33-nat46
new file mode 100644
index 0000000..89285cb
--- /dev/null
+++ b/nat46/nat46/files/etc/modules.d/33-nat46
@@ -0,0 +1,2 @@
+nat46
+
diff --git a/nat46/nat46/modules/Makefile b/nat46/nat46/modules/Makefile
new file mode 100644
index 0000000..9380882
--- /dev/null
+++ b/nat46/nat46/modules/Makefile
@@ -0,0 +1,9 @@
+obj-m += nat46.o
+nat46-objs := nat46-netdev.o nat46-module.o nat46-core.o nat46-glue.o
+CFLAGS_nat46.o := -DDEBUG
+
+all:
+	make -C /lib/modules/$(shell uname -r)/build M=$(PWD) modules
+
+clean:
+	make -C /lib/modules/$(shell uname -r)/build M=$(PWD) clean
diff --git a/nat46/nat46/modules/README b/nat46/nat46/modules/README
new file mode 100644
index 0000000..b2c0bd3
--- /dev/null
+++ b/nat46/nat46/modules/README
@@ -0,0 +1,155 @@
+ABOUT AND USAGE
+---------------
+
+This is a generic stateless NAT46 kernel module for Linux.
+
+It supports multiple simultaneous instances of NAT46 on the same host,
+allowing to implement sophisticated translation strategies just 
+by using routing to direct the packets to the appropriate interface.
+
+Upon loading, it creates a file /proc/net/nat46/control, which is used 
+to interact with it.
+
+echo add <ifname> | sudo tee /proc/net/nat46/control
+	create a new nat46 interface with a specified name
+
+echo del <ifname> | sudo tee /proc/net/nat46/control
+	delete the existing nat46 interface with a specified name
+
+echo config <ifname> <cfg-strings> | sudo tee /proc/net/nat46/control
+	pass the <cfg-strings> data to configuration routine of
+	the respective nat46 interface. In case multiple rules are
+	present - this command controls the very last one in the ruleset.
+
+echo insert <ifname> <cfg-strings> | sudo tee /proc/net/nat46/control
+	insert a new rule with the specified config string at the head
+	of the rule set for the device.
+
+CONFIGURING NAT46 DEVICE
+-----------------------
+
+Configuration parameters for the device take form of "name value" pairs,
+with the following values:
+
+debug <level>
+	set the debug level on the device to <level>
+
+local.<param> <value>
+	set the local side translation rule's parameter <param> to <value>
+
+remote.<param> <value>
+	set the remote side tranlation rule's parameter <param> to <value>
+
+
+The rules for local and remote addresses are using the same mechanism for translation
+for greater flexibility and allow several arguments. The most important argument is 
+"style", which determines what kind of the translation mechanism is employed for 
+this rule:
+
+<rule>.style NONE
+	this is a very simplistic style: it always fails, unless you configure 
+	a /32 IPv4 prefix and a /128 IPv6 prefix - at which point it starts to 
+	function as a single 1:1 translation rule.
+
+	<rule>.v4 <v4addr>/32
+	<rule>.v6 <v6addr>/128
+		both of these parameters must be set for this translation style
+		to function properly. They define the two addresses for 
+		the 1:1 mapping.
+
+	<rule>.ea-len
+	<rule>.psid-offset
+	<rule>.fmr-flag
+		ignored in this translation style
+
+	NB: in the future this translation mechanism may be extended to allow 1:1
+	subnet mapping.
+
+<rule>.style RFC6052
+	this is a rule which allows to implement the mapping used in NAT64
+	environments to represent the entire IPv4 internet as an IPv6 prefix.
+
+	<rule>.v6 <v6pref>/<v6prefixlen>
+		this defines IPv6 prefix length to translate the IPv4 internet into.
+		The allowed prefix lengths are 32, 40, 48, 56, 64, 96.
+		If a disallowed length is used, the translation fails.
+
+	<rule>.v4 <v4pref>/<v4prefixlen>
+		this parameter is ignored for now in this translation style. 
+		For backwards compatibility it should be 0.0.0.0/0
+
+	<rule>.ea-len
+	<rule>.psid-offset
+	<rule>.fmr-flag
+		ignored in this translation style
+
+<rule>.style MAP
+	this is a translation rule for the MAP (Mapping Address and Port) algorithm,
+	which may include the layer 4 identifier (tcp/udp port or ICMP id).
+
+	<rule>.v6 <v6pref>/<v6prefixlen>
+		this parameter defines the MAP domain IPv6 prefix
+
+	<rule>.v4 <v6pref>/<v6prefixlen>
+		this parameter defines the MAP domain IPv4 prefix
+
+	<rule>.ea-len
+		this parameter defines the length of the embedded address bits
+		within the IPv6 prefix that has been allocated.
+
+	<rule>.psid-offset
+		this parameter specifies how many bits to the right to shift
+		the bits of the psid within the port value.
+
+	<rule>.fmr-flag
+		this parameter allows the "local" rule to be tried as a "remote" rule 
+		as well. In MAP terminology, this allows to implement FMR rule by just
+		setting this flag. This flag is used only on the "local" side, and is
+		ignored for the "remote" side.
+
+
+CODE STRUCTURE
+--------------
+
+There are several groups of files:
+
+nat46-module.*
+	These files deal with the overall Linux module handling: loading / unloading,
+	creating and destroying the /proc control file, as well as parsing the commands
+	to pass on to the netdev portion of the code.
+
+nat46-netdev.*
+	Network device management code. This module accepts the high-level commands and
+	performs the device-level work: locating the devices in the chain, grouping
+	the functions into the device structures, etc. This module adds the pointers 
+	the processing functions which are defined in the core group.
+
+nat46-core.*
+	Core processing routines. These do not do any netdevice/module work, they use 
+	primarily sk_buff and nat64_instance_t data structures in order to operate.
+	They use the Linux kernel and glue functions.
+
+nat46-glue.*
+	These are the "adaptation" functions, over time it is expected there will
+	be almost nothing. The reason for the "glue" code to exist is to share
+	the core code with userland implementations.
+
+
+ACKNOWLEDGEMENTS
+----------------
+
+This code has been inspired or uses some parts of the following:
+
+* CERNET MAP implementation
+
+  https://github.com/cernet/MAP
+
+* Stateful NAT64 kernel module implementation by Julius Kriukas
+
+  https://github.com/fln/nat64
+
+* Stateless NAT46 kernel module implementation by Tomasz Mrugalski
+
+  https://github.com/tomaszmrugalski/ip46nat
+
+
diff --git a/nat46/nat46/modules/nat46-core.c b/nat46/nat46/modules/nat46-core.c
new file mode 100644
index 0000000..4f152a7
--- /dev/null
+++ b/nat46/nat46/modules/nat46-core.c
@@ -0,0 +1,2489 @@
+/*
+ * core nat46 functionality.
+ * It does not know about network devices, modules or anything similar:
+ * those are abstracted away by other layers.
+ *
+ * Copyright (c) 2013-2014 Andrew Yourtchenko <ayourtch@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <net/route.h>
+#include <linux/version.h>
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,4,0)
+#include <net/ipv6_frag.h>
+#endif
+#include <net/netfilter/ipv6/nf_defrag_ipv6.h>
+
+#include "nat46-glue.h"
+#include "nat46-core.h"
+#include "nat46-module.h"
+
+static uint16_t xlate_pkt_in_err_v4_to_v6(nat46_instance_t *nat46, struct iphdr *iph,
+			struct sk_buff *old_skb, uint16_t *sport, uint16_t *dport);
+static DEFINE_SPINLOCK(port_id_lock);
+
+void
+nat46debug_dump(nat46_instance_t *nat46, int level, void *addr, int len)
+{
+  char tohex[] = "0123456789ABCDEF";
+  int i = 0;
+  int k = 0;
+  unsigned char *pc = addr;
+
+  char buf0[32];                // offset
+  char buf1[64];                // hex
+  char buf2[64];                // literal
+
+  char *pc1 = buf1;
+  char *pc2 = buf2;
+
+  while(--len >= 0) {
+    if(i % 16 == 0) {
+      for(k=0; k<9; k++) {
+        buf0[k] = 0;
+      }
+      for(k=0; k<8; k++) {
+        buf0[7-k] = tohex[ 0xf & (i >> k) ];
+      }
+      buf0[8] = 0;
+      buf1[0] = 0;
+      buf2[0] = 0;
+      pc1 = buf1;
+      pc2 = buf2;
+    }
+    *pc1++ = tohex[*pc >> 4];
+    *pc1++ = tohex[*pc & 15];
+    *pc1++ = ' ';
+
+    if(*pc >= 32 && *pc < 127) {
+      *pc2++ = *pc;
+    } else {
+      *pc2++ = '.';
+    }
+    i++;
+    pc++;
+    if(i % 16 == 0) {
+      *pc1 = 0;
+      *pc2 = 0;
+      nat46debug(level, "%s:   %s  %s", buf0, buf1, buf2);
+    }
+
+  }
+  if(i % 16 != 0) {
+    while(i % 16 != 0) {
+      *pc1++ = ' ';
+      *pc1++ = ' ';
+      *pc1++ = ' ';
+      *pc2++ = ' ';
+      i++;
+    }
+    *pc1 = 0;
+    *pc2 = 0;
+    nat46debug(level, "%s:   %s  %s", buf0, buf1, buf2);
+  }
+}
+
+/* return the current arg, and advance the tail to the next space-separated word */
+char *get_next_arg(char **ptail) {
+  char *pc = NULL;
+  while ((*ptail) && (**ptail) && ((**ptail == ' ') || (**ptail == '\n'))) {
+    **ptail = 0;
+    (*ptail)++;
+  }
+  pc = *ptail;
+
+  while ((*ptail) && (**ptail) && ((**ptail != ' ') && (**ptail != '\n'))) {
+    (*ptail)++;
+  }
+
+  while ((*ptail) && (**ptail) && ((**ptail == ' ') || (**ptail == '\n'))) {
+    **ptail = 0;
+    (*ptail)++;
+  }
+
+  if ((pc) && (0 == *pc)) {
+    pc = NULL;
+  }
+  return pc;
+}
+
+/*
+ * Parse an IPv6 address (if pref_len is NULL), or prefix (if it isn't).
+ * parses destructively (places \0 between address and prefix len)
+ */
+int try_parse_ipv6_prefix(struct in6_addr *pref, int *pref_len, char *arg) {
+  int err = 0;
+  char *arg_plen = strchr(arg, '/');
+  if (arg_plen) {
+    *arg_plen++ = 0;
+    if (pref_len) {
+      *pref_len = simple_strtol(arg_plen, NULL, 10);
+
+      /*
+       * ipv6 prefix should be <= 128
+       */
+      if (*pref_len > IPV6_BITS_MAX) {
+        return -1;
+      }
+    }
+  }
+  err = (1 != in6_pton(arg, -1, (u8 *)pref, '\0', NULL));
+  return err;
+}
+
+int try_parse_ipv4_prefix(u32 *v4addr, int *pref_len, char *arg) {
+  int err = 0;
+  char *arg_plen = strchr(arg, '/');
+  if (arg_plen) {
+    *arg_plen++ = 0;
+    if (pref_len) {
+      *pref_len = simple_strtol(arg_plen, NULL, 10);
+
+      /*
+       * ipv4 prefix len should be <= 32
+       */
+      if (*pref_len > IPV4_BITS_MAX) {
+        return -1;
+      }
+    }
+  }
+  err = (1 != in4_pton(arg, -1, (u8 *)v4addr, '/', NULL));
+  return err;
+}
+
+
+/*
+ * parse a rule argument and put config into a rule.
+ * advance the tail to prepare for the next arg parsing.
+ * destructive.
+ */
+
+int try_parse_rule_arg(nat46_xlate_rule_t *rule, char *arg_name, char **ptail) {
+  int err = 0;
+  char *val = get_next_arg(ptail);
+  if (NULL == val) {
+    err = -1;
+  } else if (0 == strcmp(arg_name, "v6")) {
+    err = try_parse_ipv6_prefix(&rule->v6_pref, &rule->v6_pref_len, val);
+  } else if (0 == strcmp(arg_name, "v4")) {
+    err = try_parse_ipv4_prefix(&rule->v4_pref, &rule->v4_pref_len, val);
+  } else if (0 == strcmp(arg_name, "ea-len")) {
+    rule->ea_len = simple_strtol(val, NULL, 10);
+  } else if (0 == strcmp(arg_name, "psid-offset")) {
+    rule->psid_offset = simple_strtol(val, NULL, 10);
+  } else if (0 == strcmp(arg_name, "style")) {
+    if (0 == strcmp("MAP", val)) {
+      rule->style = NAT46_XLATE_MAP;
+    } else if (0 == strcmp("MAP0", val)) {
+      rule->style = NAT46_XLATE_MAP0;
+    } else if (0 == strcmp("RFC6052", val)) {
+      rule->style = NAT46_XLATE_RFC6052;
+    } else if (0 == strcmp("NONE", val)) {
+      rule->style = NAT46_XLATE_NONE;
+    } else {
+      err = 1;
+    }
+  }
+  return err;
+}
+
+static inline void nat46_swap(nat46_xlate_rulepair_t *var1, nat46_xlate_rulepair_t *var2) {
+	nat46_xlate_rulepair_t temp;
+	temp = *var1;
+	*var1 = *var2;
+	*var2 = temp;
+}
+
+/*
+ * Sort rule pairs based on prefix length.
+ */
+void nat46_sort_rule_array(nat46_instance_t *nat46) {
+	int i, j;
+	int nelem = nat46->npairs;
+	nat46_xlate_rulepair_t *array = NULL;
+
+	memcpy(nat46->sorted_ipv4_local_pairs, nat46->pairs, nelem * sizeof(nat46_xlate_rulepair_t));
+	memcpy(nat46->sorted_ipv4_remote_pairs, nat46->pairs, nelem * sizeof(nat46_xlate_rulepair_t));
+	memcpy(nat46->sorted_ipv6_local_pairs, nat46->pairs, nelem * sizeof(nat46_xlate_rulepair_t));
+	memcpy(nat46->sorted_ipv6_remote_pairs, nat46->pairs, nelem * sizeof(nat46_xlate_rulepair_t));
+
+	array = &nat46->sorted_ipv4_local_pairs[0];
+	for (i = 0; i < nelem - 1; i++) {
+		for (j = 0; j < nelem - i - 1; j++) {
+			if (array[j].local.v4_pref_len < array[j+1].local.v4_pref_len) {
+				nat46_swap (&array[j], &array[j+1]);
+			}
+		}
+	}
+
+	array = &nat46->sorted_ipv4_remote_pairs[0];
+	for (i = 0; i < nelem - 1; i++) {
+		for (j = 0; j < nelem - i - 1; j++) {
+			if (array[j].remote.v4_pref_len < array[j+1].remote.v4_pref_len) {
+				nat46_swap (&array[j], &array[j+1]);
+			}
+		}
+	}
+
+	array = &nat46->sorted_ipv6_local_pairs[0];
+	for (i = 0; i < nelem - 1; i++) {
+		for (j = 0; j < nelem - i - 1; j++) {
+			if (array[j].local.v6_pref_len < array[j+1].local.v6_pref_len) {
+				nat46_swap (&array[j], &array[j+1]);
+			}
+		}
+	}
+
+	array = &nat46->sorted_ipv6_remote_pairs[0];
+	for (i = 0; i < nelem - 1; i++) {
+		for (j = 0; j < nelem - i - 1; j++) {
+			if (array[j].remote.v6_pref_len < array[j+1].remote.v6_pref_len) {
+				nat46_swap (&array[j], &array[j+1]);
+			}
+		}
+	}
+}
+
+bool nat46_validate_RFC6052_style(nat46_instance_t *nat46, nat46_xlate_rule_t rule)
+{
+	if (rule.style == NAT46_XLATE_RFC6052) {
+		if (!((rule.v6_pref_len == 32) || (rule.v6_pref_len == 40) ||
+					(rule.v6_pref_len == 48) || (rule.v6_pref_len == 56) ||
+					(rule.v6_pref_len == 64) || (rule.v6_pref_len == 96))) {
+			nat46debug(3, "IPv6 prefix len is invalid");
+			return false;
+		}
+	}
+	return true;
+}
+
+bool nat46_validate_MAP_style(nat46_instance_t *nat46, nat46_xlate_rule_t rule)
+{
+	int psid_len;
+	if (rule.style == NAT46_XLATE_MAP) {
+
+		/*
+		 * max ea_len is 48
+		 */
+		if (rule.ea_len > EA_LEN_MAX) {
+			nat46debug(3, "EA-length should not exceed 48");
+			return false;
+		}
+
+		if (rule.v4_pref_len + rule.ea_len > IPV4_BITS_MAX) {
+			psid_len = rule.ea_len - (IPV4_BITS_MAX - rule.v4_pref_len);
+		} else {
+			psid_len = 0;
+		}
+
+		if (psid_len + rule.psid_offset > PSID_LEN_MAX) {
+			nat46debug(3, "psid_len + psid_offset should not exceed 16");
+			return false;
+		}
+	}
+	return true;
+}
+
+int nat46_validate_ipair_config(nat46_instance_t *nat46, nat46_xlate_rulepair_t *apair)
+{
+	if (!nat46_validate_RFC6052_style(nat46, apair->local)) {
+		return -1;
+	}
+
+	if (!nat46_validate_RFC6052_style(nat46, apair->remote)) {
+		return -1;
+	}
+
+	if (!nat46_validate_MAP_style(nat46, apair->local)) {
+		return -1;
+	}
+
+	if (!nat46_validate_MAP_style(nat46, apair->remote)) {
+		return -1;
+	}
+	return 0;
+}
+
+/*
+ * Parse the config commands in the buffer,
+ * destructive (puts zero between the args)
+ */
+int nat46_set_ipair_config(nat46_instance_t *nat46, int ipair, char *buf, int count) {
+  char *tail = buf;
+  char *arg_name;
+  int err = 0;
+  char *val;
+  nat46_xlate_rulepair_t *apair = NULL;
+
+  if ((ipair < 0) || (ipair >= nat46->npairs)) {
+    return -1;
+  }
+
+  apair = &nat46->pairs[ipair];
+
+  while ((0 == err) && (NULL != (arg_name = get_next_arg(&tail)))) {
+    if (0 == strcmp(arg_name, "debug")) {
+      val = get_next_arg(&tail);
+      if (val) {
+        nat46->debug = simple_strtol(val, NULL, 10);
+      }
+    } else if (arg_name == strstr(arg_name, "local.")) {
+      arg_name += strlen("local.");
+      nat46debug(13, "Setting local xlate parameter");
+      err = try_parse_rule_arg(&apair->local, arg_name, &tail);
+    } else if (arg_name == strstr(arg_name, "remote.")) {
+      arg_name += strlen("remote.");
+      nat46debug(13, "Setting remote xlate parameter");
+      err = try_parse_rule_arg(&apair->remote, arg_name, &tail);
+    }
+  }
+
+  err = nat46_validate_ipair_config(nat46, apair);
+  if (err) {
+    return err;
+  }
+
+  /*
+   * sort nat46->pairs based on prefix length.
+   */
+  nat46_sort_rule_array(nat46);
+
+  return 0;
+}
+
+int nat46_set_config(nat46_instance_t *nat46, char *buf, int count) {
+  int ret = -1;
+  if (nat46->npairs > 0) {
+    ret = nat46_set_ipair_config(nat46, nat46->npairs-1, buf, count);
+  }
+  return ret;
+}
+
+char *xlate_style_to_string(nat46_xlate_style_t style) {
+  switch(style) {
+    case NAT46_XLATE_NONE:
+      return "NONE";
+    case NAT46_XLATE_MAP:
+      return "MAP";
+    case NAT46_XLATE_MAP0:
+      return "MAP0";
+    case NAT46_XLATE_RFC6052:
+      return "RFC6052";
+  }
+  return "unknown";
+}
+
+/*
+ * Get the nat46 configuration into a supplied buffer (if non-null).
+ */
+int nat46_get_ipair_config(nat46_instance_t *nat46, int ipair, char *buf, int count) {
+  int ret = 0;
+  nat46_xlate_rulepair_t *apair = NULL;
+  char *format = "local.v4 %pI4/%d local.v6 %pI6c/%d local.style %s local.ea-len %d local.psid-offset %d remote.v4 %pI4/%d remote.v6 %pI6c/%d remote.style %s remote.ea-len %d remote.psid-offset %d debug %d";
+
+  if ((ipair < 0) || (ipair >= nat46->npairs)) {
+    return ret;
+  }
+  apair = &nat46->pairs[ipair];
+
+  ret = snprintf(buf, count, format,
+		&apair->local.v4_pref, apair->local.v4_pref_len,
+		&apair->local.v6_pref, apair->local.v6_pref_len,
+		xlate_style_to_string(apair->local.style),
+		apair->local.ea_len, apair->local.psid_offset,
+
+		&apair->remote.v4_pref, apair->remote.v4_pref_len,
+		&apair->remote.v6_pref, apair->remote.v6_pref_len,
+		xlate_style_to_string(apair->remote.style),
+		apair->remote.ea_len, apair->remote.psid_offset,
+
+		nat46->debug);
+  return ret;
+}
+
+int nat46_get_config(nat46_instance_t *nat46, char *buf, int count) {
+  int ret = 0;
+  if (nat46->npairs > 0) {
+    ret = nat46_get_ipair_config(nat46, nat46->npairs-1, buf, count);
+  } else {
+    nat46debug(0, "nat46_get_config: npairs is 0");
+  }
+  return ret;
+}
+
+
+/********************************************************************
+
+From RFC6052, section 2.2:
+
+    +--+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
+    |PL| 0-------------32--40--48--56--64--72--80--88--96--104---------|
+    +--+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
+    |32|     prefix    |v4(32)         | u | suffix                    |
+    +--+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
+    |40|     prefix        |v4(24)     | u |(8)| suffix                |
+    +--+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
+    |48|     prefix            |v4(16) | u | (16)  | suffix            |
+    +--+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
+    |56|     prefix                |(8)| u |  v4(24)   | suffix        |
+    +--+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
+    |64|     prefix                    | u |   v4(32)      | suffix    |
+    +--+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
+    |96|     prefix                                    |    v4(32)     |
+    +--+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+---+
+
+********************************************************************/
+
+void xlate_v4_to_nat64(nat46_instance_t *nat46, nat46_xlate_rule_t *rule, void *pipv4, void *pipv6) {
+  char *ipv4 = pipv4;
+  char *ipv6 = pipv6;
+
+  /* 'u' byte and suffix are zero */
+  memset(&ipv6[8], 0, 8);
+  switch(rule->v6_pref_len) {
+    case 32:
+      memcpy(ipv6, &rule->v6_pref, 4);
+      memcpy(&ipv6[4], ipv4, 4);
+      break;
+    case 40:
+      memcpy(ipv6, &rule->v6_pref, 5);
+      memcpy(&ipv6[5], ipv4, 3);
+      ipv6[9] = ipv4[3];
+      break;
+    case 48:
+      memcpy(ipv6, &rule->v6_pref, 6);
+      ipv6[6] = ipv4[0];
+      ipv6[7] = ipv4[1];
+      ipv6[9] = ipv4[2];
+      ipv6[10] = ipv4[3];
+      break;
+    case 56:
+      memcpy(ipv6, &rule->v6_pref, 7);
+      ipv6[7] = ipv4[0];
+      ipv6[9] = ipv4[1];
+      ipv6[10] = ipv4[2];
+      ipv6[11] = ipv4[3];
+      break;
+    case 64:
+      memcpy(ipv6, &rule->v6_pref, 8);
+      memcpy(&ipv6[9], ipv4, 4);
+      break;
+    case 96:
+      memcpy(ipv6, &rule->v6_pref, 12);
+      memcpy(&ipv6[12], ipv4, 4);
+      break;
+  }
+}
+
+int xlate_nat64_to_v4(nat46_instance_t *nat46, nat46_xlate_rule_t *rule, void *pipv6, void *pipv4) {
+  char *ipv4 = pipv4;
+  char *ipv6 = pipv6;
+  int cmp = -1;
+  int v6_pref_len = rule->v6_pref_len;
+
+  switch(v6_pref_len) {
+    case 32:
+      cmp = memcmp(ipv6, &rule->v6_pref, 4);
+      break;
+    case 40:
+      cmp = memcmp(ipv6, &rule->v6_pref, 5);
+      break;
+    case 48:
+      cmp = memcmp(ipv6, &rule->v6_pref, 6);
+      break;
+    case 56:
+      cmp = memcmp(ipv6, &rule->v6_pref, 7);
+      break;
+    case 64:
+      cmp = memcmp(ipv6, &rule->v6_pref, 8);
+      break;
+    case 96:
+      cmp = memcmp(ipv6, &rule->v6_pref, 12);
+      break;
+  }
+  if (cmp) {
+    /* Not in NAT64 prefix */
+    return 0;
+  }
+  switch(v6_pref_len) {
+    case 32:
+      memcpy(ipv4, &ipv6[4], 4);
+      break;
+    case 40:
+      memcpy(ipv4, &ipv6[5], 3);
+      ipv4[3] = ipv6[9];
+      break;
+    case 48:
+      ipv4[0] = ipv6[6];
+      ipv4[1] = ipv6[7];
+      ipv4[2] = ipv6[9];
+      ipv4[3] = ipv6[10];
+      break;
+    case 56:
+      ipv4[0] = ipv6[7];
+      ipv4[1] = ipv6[9];
+      ipv4[2] = ipv6[10];
+      ipv4[3] = ipv6[11];
+      break;
+    case 64:
+      memcpy(ipv4, &ipv6[9], 4);
+      break;
+    case 96:
+      memcpy(ipv4, &ipv6[12], 4);
+      break;
+  }
+  return 1;
+}
+
+/*
+
+The below bitarray copy code is from
+
+http://stackoverflow.com/questions/3534535/whats-a-time-efficient-algorithm-to-copy-unaligned-bit-arrays
+
+*/
+
+#define CHAR_BIT 8
+#define PREPARE_FIRST_COPY()                                      \
+    do {                                                          \
+    if (src_len >= (CHAR_BIT - dst_offset_modulo)) {              \
+        *dst     &= reverse_mask[dst_offset_modulo];              \
+        src_len -= CHAR_BIT - dst_offset_modulo;                  \
+    } else {                                                      \
+        *dst     &= reverse_mask[dst_offset_modulo]               \
+              | reverse_mask_xor[dst_offset_modulo + src_len + 1];\
+         c       &= reverse_mask[dst_offset_modulo + src_len    ];\
+        src_len = 0;                                              \
+    } } while (0)
+
+
+static void
+bitarray_copy(const void *src_org, int src_offset, int src_len,
+                    void *dst_org, int dst_offset)
+{
+/*
+    static const unsigned char mask[] =
+        { 0x55, 0x01, 0x03, 0x07, 0x0f, 0x1f, 0x3f, 0x7f, 0xff };
+*/
+    static const unsigned char reverse_mask[] =
+        { 0x55, 0x80, 0xc0, 0xe0, 0xf0, 0xf8, 0xfc, 0xfe, 0xff };
+    static const unsigned char reverse_mask_xor[] =
+        { 0xff, 0x7f, 0x3f, 0x1f, 0x0f, 0x07, 0x03, 0x01, 0x00 };
+
+    if (src_len) {
+        const unsigned char *src;
+              unsigned char *dst;
+        int                  src_offset_modulo,
+                             dst_offset_modulo;
+
+        src = src_org + (src_offset / CHAR_BIT);
+        dst = dst_org + (dst_offset / CHAR_BIT);
+
+        src_offset_modulo = src_offset % CHAR_BIT;
+        dst_offset_modulo = dst_offset % CHAR_BIT;
+
+        if (src_offset_modulo == dst_offset_modulo) {
+            int              byte_len;
+            int              src_len_modulo;
+            if (src_offset_modulo) {
+                unsigned char   c;
+
+                c = reverse_mask_xor[dst_offset_modulo]     & *src++;
+
+                PREPARE_FIRST_COPY();
+                *dst++ |= c;
+            }
+
+            byte_len = src_len / CHAR_BIT;
+            src_len_modulo = src_len % CHAR_BIT;
+
+            if (byte_len) {
+                memcpy(dst, src, byte_len);
+                src += byte_len;
+                dst += byte_len;
+            }
+            if (src_len_modulo) {
+                *dst     &= reverse_mask_xor[src_len_modulo];
+                *dst |= reverse_mask[src_len_modulo]     & *src;
+            }
+        } else {
+            int             bit_diff_ls,
+                            bit_diff_rs;
+            int             byte_len;
+            int             src_len_modulo;
+            unsigned char   c;
+            /*
+             * Begin: Line things up on destination.
+             */
+            if (src_offset_modulo > dst_offset_modulo) {
+                bit_diff_ls = src_offset_modulo - dst_offset_modulo;
+                bit_diff_rs = CHAR_BIT - bit_diff_ls;
+
+                c = *src++ << bit_diff_ls;
+                c |= *src >> bit_diff_rs;
+                c     &= reverse_mask_xor[dst_offset_modulo];
+            } else {
+                bit_diff_rs = dst_offset_modulo - src_offset_modulo;
+                bit_diff_ls = CHAR_BIT - bit_diff_rs;
+
+                c = *src >> bit_diff_rs     &
+                    reverse_mask_xor[dst_offset_modulo];
+            }
+            PREPARE_FIRST_COPY();
+            *dst++ |= c;
+
+            /*
+             * Middle: copy with only shifting the source.
+             */
+            byte_len = src_len / CHAR_BIT;
+
+            while (--byte_len >= 0) {
+                c = *src++ << bit_diff_ls;
+                c |= *src >> bit_diff_rs;
+                *dst++ = c;
+            }
+
+            /*
+             * End: copy the remaining bits;
+             */
+            src_len_modulo = src_len % CHAR_BIT;
+            if (src_len_modulo) {
+                c = *src++ << bit_diff_ls;
+                c |= *src >> bit_diff_rs;
+                c     &= reverse_mask[src_len_modulo];
+
+                *dst     &= reverse_mask_xor[src_len_modulo];
+                *dst |= c;
+            }
+        }
+    }
+}
+
+int xlate_map_v4_to_v6(nat46_instance_t *nat46, nat46_xlate_rule_t *rule, void *pipv4, void *pipv6, uint16_t *pl4id, int map_version) {
+  int ret = 0;
+  u32 *pv4u32 = pipv4;
+  uint8_t *p6 = pipv6;
+
+  uint16_t psid;
+  uint16_t l4id = pl4id ? *pl4id : 0;
+  uint8_t psid_bits_len = rule->ea_len - (32 - rule->v4_pref_len);
+  uint8_t v4_lsb_bits_len = 32 - rule->v4_pref_len;
+
+  /* check that the ipv4 address is within the IPv4 map domain and reject if not */
+
+  if ( (ntohl(*pv4u32) & (0xffffffff << v4_lsb_bits_len)) != ntohl(rule->v4_pref) ) {
+    nat46debug(5, "xlate_map_v4_to_v6: IPv4 address %pI4 outside of MAP domain %pI4/%d", pipv4, &rule->v4_pref, rule->v4_pref_len);
+    return 0;
+  }
+
+  if (rule->ea_len < (32 - rule->v4_pref_len) ) {
+    nat46debug(0, "xlate_map_v4_to_v6: rule->ea_len < (32 - rule->v4_pref_len)");
+    return 0;
+  }
+
+  if (!pl4id && psid_bits_len) {
+    nat46debug(5, "xlate_map_v4_to_v6: l4id required for MAP domain %pI4/%d (ea-len %d)", &rule->v4_pref, rule->v4_pref_len, rule->ea_len);
+    return 0;
+  }
+
+  /* zero out the IPv6 address */
+  memset(pipv6, 0, 16);
+
+  psid = (ntohs(l4id) >> (16 - psid_bits_len - rule->psid_offset)) & (0xffff >> (16 - psid_bits_len));
+  nat46debug(10, "xlate_map_v4_to_v6: ntohs(l4id): %04x psid_bits_len: %d, rule psid-offset: %d, psid: %d\n", ntohs(l4id), psid_bits_len, rule->psid_offset, psid);
+
+  /*
+   *     create the IID. pay the attention there can be two formats:
+   *
+   *     draft-ietf-softwire-map-t-00:
+   *
+   *
+   *   +--+---+---+---+---+---+---+---+---+
+   *   |PL|   8  16  24  32  40  48  56   |
+   *   +--+---+---+---+---+---+---+---+---+
+   *   |64| u | IPv4 address  |  PSID | 0 |
+   *   +--+---+---+---+---+---+---+---+---+
+   *
+   *
+   *     latest draft-ietf-softwire-map-t:
+   *
+   *   |        128-n-o-s bits            |
+   *   | 16 bits|    32 bits     | 16 bits|
+   *   +--------+----------------+--------+
+   *   |   0    |  IPv4 address  |  PSID  |
+   *   +--------+----------------+--------+
+   *
+   *   In the case of an IPv4 prefix, the IPv4 address field is right-padded
+   *   with zeros up to 32 bits.  The PSID is zero left-padded to create a
+   *   16 bit field.  For an IPv4 prefix or a complete IPv4 address, the
+   *   PSID field is zero.
+   *
+   *   If the End-user IPv6 prefix length is larger than 64, the most
+   *   significant parts of the interface identifier is overwritten by the
+   *   prefix.
+   *
+   */
+  if (map_version) {
+    p6[8] = p6[9] = 0;
+    p6[10] = 0xff & (ntohl(*pv4u32) >> 24);
+    p6[11] = 0xff & (ntohl(*pv4u32) >> 16);
+    p6[12] = 0xff & (ntohl(*pv4u32) >> 8);
+    p6[13] = 0xff & (ntohl(*pv4u32));
+    p6[14] = 0xff & (psid >> 8);
+    p6[15] = 0xff & (psid);
+  } else {
+    p6[8]  = 0;
+    p6[9]  = 0xff & (ntohl(*pv4u32) >> 24);
+    p6[10] = 0xff & (ntohl(*pv4u32) >> 16);
+    p6[11] = 0xff & (ntohl(*pv4u32) >> 8);
+    p6[12] = 0xff & (ntohl(*pv4u32));
+    p6[13] = 0xff & (psid >> 8);
+    p6[14] = 0xff & (psid);
+    p6[15] = 0;
+    /* old EID */
+  }
+
+  /* copy the necessary part of domain IPv6 prefix into place, w/o overwriting the existing data */
+  bitarray_copy(&rule->v6_pref, 0, rule->v6_pref_len, p6, 0);
+
+  if (v4_lsb_bits_len) {
+    /* insert the lower 32-v4_pref_len bits of IPv4 address at rule->v6_pref_len */
+    bitarray_copy(pipv4, rule->v4_pref_len, v4_lsb_bits_len, p6, rule->v6_pref_len);
+  }
+
+  if (psid_bits_len) {
+    /* insert the psid bits at rule->v6_pref_len + v4_lsb_bits */
+    bitarray_copy(&l4id, rule->psid_offset, psid_bits_len, p6, rule->v6_pref_len + v4_lsb_bits_len);
+  }
+
+  ret = 1;
+
+  return ret;
+}
+
+int xlate_map_v6_to_v4(nat46_instance_t *nat46, nat46_xlate_rule_t *rule, void *pipv6, void *pipv4, int version) {
+  uint8_t v4_lsb_bits_len = 32 - rule->v4_pref_len;
+
+  if (memcmp(pipv6, &rule->v6_pref, rule->v6_pref_len/8)) {
+    /* address not within the MAP IPv6 prefix */
+    nat46debug(5, "xlate_map_v6_to_v4: IPv6 address %pI6 outside of MAP domain %pI6/%d", pipv6, &rule->v6_pref, rule->v6_pref_len);
+    return 0;
+  }
+  if (rule->v6_pref_len % 8) {
+    uint8_t mask = 0xff << (8 - (rule->v6_pref_len % 8));
+    uint8_t *pa1 = (uint8_t *)pipv6 + (rule->v6_pref_len/8);
+    uint8_t *pa2 = (uint8_t *)&rule->v6_pref + (rule->v6_pref_len/8);
+
+    if ( (*pa1 & mask) != (*pa2 & mask) ) {
+      nat46debug(5, "xlate_map_v6_to_v4: IPv6 address %pI6 outside of MAP domain %pI6/%d (LSB)", pipv6, &rule->v6_pref, rule->v6_pref_len);
+      return 0;
+    }
+  }
+
+  if (rule->ea_len < (32 - rule->v4_pref_len) ) {
+    nat46debug(0, "xlate_map_v6_to_v4: rule->ea_len < (32 - rule->v4_pref_len)");
+    return 0;
+  }
+
+  memcpy(pipv4, &rule->v4_pref, 4);
+  if (v4_lsb_bits_len) {
+    bitarray_copy(pipv6, rule->v6_pref_len, v4_lsb_bits_len, pipv4, rule->v4_pref_len);
+  }
+  /*
+   * I do not verify the PSID here. The idea is that if the destination port is incorrect, this
+   * will be caught in the NAT44 module.
+   */
+  return 1;
+}
+
+int xlate_v4_to_v6(nat46_instance_t *nat46, nat46_xlate_rule_t *rule, void *pipv4, void *pipv6, uint16_t *pl4id) {
+  int ret = 0;
+  switch(rule->style) {
+    case NAT46_XLATE_NONE: /* always fail unless it is a host 1:1 translation */
+      if ( (rule->v6_pref_len == 128) && (rule->v4_pref_len == 32) &&
+           (0 == memcmp(pipv4, &rule->v4_pref, sizeof(rule->v4_pref))) ) {
+         memcpy(pipv6, &rule->v6_pref, sizeof(rule->v6_pref));
+         ret = 1;
+      }
+      break;
+    case NAT46_XLATE_MAP0:
+      ret = xlate_map_v4_to_v6(nat46, rule, pipv4, pipv6, pl4id, 0);
+      break;
+    case NAT46_XLATE_MAP:
+      ret = xlate_map_v4_to_v6(nat46, rule, pipv4, pipv6, pl4id, 1);
+      break;
+    case NAT46_XLATE_RFC6052:
+      xlate_v4_to_nat64(nat46, rule, pipv4, pipv6);
+      /* NAT46 rules using RFC6052 always succeed since they can map any IPv4 address */
+      ret = 1;
+      break;
+  }
+  return ret;
+}
+
+int xlate_v6_to_v4(nat46_instance_t *nat46, nat46_xlate_rule_t *rule, void *pipv6, void *pipv4) {
+  int ret = 0;
+  switch(rule->style) {
+    case NAT46_XLATE_NONE: /* always fail unless it is a host 1:1 translation */
+      if ( (rule->v6_pref_len == 128) && (rule->v4_pref_len == 32) &&
+           (0 == memcmp(pipv6, &rule->v6_pref, sizeof(rule->v6_pref))) ) {
+         memcpy(pipv4, &rule->v4_pref, sizeof(rule->v4_pref));
+         ret = 1;
+      }
+      break;
+    case NAT46_XLATE_MAP0:
+      ret = xlate_map_v6_to_v4(nat46, rule, pipv6, pipv4, 0);
+      break;
+    case NAT46_XLATE_MAP:
+      ret = xlate_map_v6_to_v4(nat46, rule, pipv6, pipv4, 1);
+      break;
+    case NAT46_XLATE_RFC6052:
+      ret = xlate_nat64_to_v4(nat46, rule, pipv6, pipv4);
+      break;
+  }
+  return ret;
+}
+
+__sum16 csum16_upd(__sum16 csum, u16 old, u16 new) {
+  u32 s;
+  csum = ntohs(~csum);
+  s = (u32)csum + ntohs(~old) + ntohs(new);
+  s = ((s >> 16) & 0xffff) + (s & 0xffff);
+  s += ((s >> 16) & 0xffff);
+  return htons((u16)(~s));
+}
+
+/* Add the TCP/UDP pseudoheader, basing on the existing checksum */
+
+__sum16 csum_tcpudp_remagic(__be32 saddr, __be32 daddr, unsigned short len,
+                  unsigned char proto, u16 csum) {
+  u16 *pdata;
+  u16 len0, len1;
+
+  pdata = (u16 *)&saddr;
+  csum = csum16_upd(csum, 0, *pdata++);
+  csum = csum16_upd(csum, 0, *pdata++);
+  pdata = (u16 *)&daddr;
+  csum = csum16_upd(csum, 0, *pdata++);
+  csum = csum16_upd(csum, 0, *pdata++);
+
+  csum = csum16_upd(csum, 0, htons(proto));
+  len1 = htons( (len >> 16) & 0xffff );
+  len0 = htons(len & 0xffff);
+  csum = csum16_upd(csum, 0, len1);
+  csum = csum16_upd(csum, 0, len0);
+  return csum;
+}
+
+/* Undo the IPv6 pseudoheader inclusion into the checksum */
+__sum16 csum_ipv6_unmagic(nat46_instance_t *nat46, const struct in6_addr *saddr,
+                        const struct in6_addr *daddr,
+                        __u32 len, unsigned short proto,
+                        __sum16 csum) {
+  u16 *pdata;
+  int i;
+  u16 len0, len1;
+
+  pdata = (u16 *)saddr;
+  for(i=0;i<8;i++) {
+    csum = csum16_upd(csum, *pdata, 0);
+    pdata++;
+  }
+  pdata = (u16 *)daddr;
+  for(i=0;i<8;i++) {
+    csum = csum16_upd(csum, *pdata, 0);
+    pdata++;
+  }
+  csum = csum16_upd(csum, htons(proto), 0);
+  len1 = htons( (len >> 16) & 0xffff );
+  len0 = htons(len & 0xffff);
+  csum = csum16_upd(csum, len1, 0);
+  csum = csum16_upd(csum, len0, 0);
+  return csum;
+}
+
+/* Update ICMPv6 type/code with incremental checksum adjustment */
+void update_icmp6_type_code(nat46_instance_t *nat46, struct icmp6hdr *icmp6h, u8 type, u8 code) {
+  u16 old_tc = *((u16 *)icmp6h);
+  u16 new_tc;
+  u16 old_csum = icmp6h->icmp6_cksum;
+  u16 new_csum;
+  icmp6h->icmp6_type = type;
+  icmp6h->icmp6_code = code;
+  new_tc = *((u16 *)icmp6h);
+  /* https://tools.ietf.org/html/rfc1624 */
+  new_csum = csum16_upd(old_csum, old_tc, new_tc);
+  nat46debug(1, "Updating the ICMPv6 type to ICMP type %d and code to %d. Old T/C: %04X, New T/C: %04X, Old CS: %04X, New CS: %04X", type, code, old_tc, new_tc, old_csum, new_csum);
+  icmp6h->icmp6_cksum = new_csum;
+}
+
+
+u16 get_next_ip_id(void) {
+  static u16 id = 0;
+  return id++;
+}
+
+u16 fold_ipv6_frag_id(u32 v6id) {
+  return ((0xffff & (v6id >> 16)) ^ (v6id & 0xffff));
+}
+
+void *add_offset(void *ptr, u16 offset) {
+  return (((char *)ptr)+offset);
+}
+
+
+/* FIXME: traverse the headers properly */
+void *get_next_header_ptr6(void *pv6, int v6_len) {
+  struct ipv6hdr *ip6h = pv6;
+  void *ret = (ip6h+1);
+
+  if (ip6h->nexthdr == NEXTHDR_FRAGMENT) {
+    struct frag_hdr *fh = (struct frag_hdr*)(ip6h + 1);
+    if(fh->frag_off == 0) {
+      /* Atomic fragment */
+      ret = add_offset(ret, 8);
+    }
+  }
+  return ret;
+}
+
+void fill_v6hdr_from_v4hdr(struct iphdr *iph, struct ipv6hdr *ip6h) {
+  *((__be16 *)ip6h) = htons((6 << 12) | (iph->tos << 4));	/* Version, Traffic Class */
+  memset(&(ip6h->flow_lbl), 0, sizeof(ip6h->flow_lbl));		/* Flowlabel */
+  ip6h->payload_len = htons(ntohs(iph->tot_len) - IPV4HDRSIZE);
+  ip6h->nexthdr = iph->protocol;
+  ip6h->hop_limit = iph->ttl;
+}
+
+void fill_v4hdr_from_v6hdr(struct iphdr * iph, struct ipv6hdr *ip6h, __u32 v4saddr, __u32 v4daddr, __u16 id, __u16 frag_off, __u16 proto, int l3_payload_len) {
+  uint32_t ver_class_flow = ntohl(*(__be32 *)ip6h);
+  iph->ttl = ip6h->hop_limit;
+  iph->saddr = v4saddr;
+  iph->daddr = v4daddr;
+  iph->protocol = proto;
+  *((__be16 *)iph) = htons((4 << 12) | (5 << 8) | ((ver_class_flow >> 20) & 0xff));
+  iph->frag_off = frag_off;
+  iph->id = id;
+  iph->tot_len = htons( l3_payload_len + IPV4HDRSIZE );
+  iph->check = 0;
+  iph->check = ip_fast_csum((unsigned char *)iph, iph->ihl);
+}
+
+u16 unchecksum16(void *p, int count, u16 csum) {
+  u16 *pu16 = p;
+  int i = count;
+  while(i--) {
+    csum = csum16_upd(csum, *pu16++, 0);
+  }
+  return csum;
+}
+
+u16 rechecksum16(void *p, int count, u16 csum) {
+  u16 *pu16 = p;
+  int i = count;
+  while(i--) {
+    csum = csum16_upd(csum, 0, *pu16++);
+  }
+  return csum;
+}
+
+/* Last rule in group must not have "none" as either source or destination */
+int is_last_pair_in_group(nat46_xlate_rulepair_t *apair) {
+  return ( (apair->local.style != NAT46_XLATE_NONE) && (apair->remote.style != NAT46_XLATE_NONE) );
+}
+
+nat46_xlate_rulepair_t *nat46_lpm(nat46_instance_t *nat46, nat46_rule_type_t type, void *paddr) {
+	int ipair = 0;
+	nat46_xlate_rulepair_t *apair = NULL;
+	uint32_t mask = 0;
+	uint8_t *pa1;
+	uint8_t *pa2;
+
+	if(!nat46 || !paddr) {
+		return NULL;
+	}
+
+	switch (type) {
+	case NAT46_IPV4_LOCAL:
+		for (ipair = 0; ipair < nat46->npairs; ipair++) {
+			apair = &nat46->sorted_ipv4_local_pairs[ipair];
+
+			/*
+			 * For a 32-bit number, if the shift count is 32, then the
+			 * result of the left shift operation is always 0.
+			 */
+			if (apair->local.v4_pref_len) {
+				mask = htonl(U32_MASK << (IPV4_BITS_MAX - apair->local.v4_pref_len));
+			}
+
+			if((*(uint32_t *)paddr & mask) == (apair->local.v4_pref & mask)) {
+				return apair;
+			}
+		}
+		break;
+	case NAT46_IPV4_REMOTE:
+		for (ipair = 0; ipair < nat46->npairs; ipair++) {
+			apair = &nat46->sorted_ipv4_remote_pairs[ipair];
+
+			/*
+			 * For a 32-bit number, if the shift count is 32, then the
+			 * result of the left shift operation is always 0.
+			 */
+			if (apair->remote.v4_pref_len) {
+				mask = htonl(U32_MASK << (IPV4_BITS_MAX - apair->remote.v4_pref_len));
+			}
+
+			if((*(uint32_t *)paddr & mask) == (apair->remote.v4_pref & mask)) {
+				return apair;
+			}
+		}
+		break;
+	case NAT46_IPV6_LOCAL:
+		for (ipair = 0; ipair < nat46->npairs; ipair++) {
+			apair = &nat46->sorted_ipv6_local_pairs[ipair];
+			if(memcmp(paddr, &apair->local.v6_pref, apair->local.v6_pref_len / BITS_PER_BYTE)) {
+				continue;
+			}
+			if(apair->local.v6_pref_len % BITS_PER_BYTE) {
+				mask = U8_MASK << (BITS_PER_BYTE - (apair->local.v6_pref_len % BITS_PER_BYTE));
+				pa1 = (uint8_t *)paddr + (apair->local.v6_pref_len / BITS_PER_BYTE);
+				pa2 = (uint8_t *)&apair->local.v6_pref + (apair->local.v6_pref_len / BITS_PER_BYTE);
+
+				if ((*pa1 & mask) == (*pa2 & mask)) {
+					return apair;
+				}
+			}
+			else
+				return apair;
+		}
+		break;
+	case NAT46_IPV6_REMOTE:
+		for (ipair = 0; ipair < nat46->npairs; ipair++) {
+			apair = &nat46->sorted_ipv6_remote_pairs[ipair];
+			if(memcmp(paddr, &apair->remote.v6_pref, apair->remote.v6_pref_len / BITS_PER_BYTE)) {
+				continue;
+			}
+			if(apair->remote.v6_pref_len % BITS_PER_BYTE) {
+				mask = U8_MASK << (BITS_PER_BYTE - (apair->remote.v6_pref_len % BITS_PER_BYTE));
+				pa1 = (uint8_t *)paddr + (apair->remote.v6_pref_len / BITS_PER_BYTE);
+				pa2 = (uint8_t *)&apair->remote.v6_pref + (apair->remote.v6_pref_len / BITS_PER_BYTE);
+
+				if((*pa1 & mask) == (*pa2 & mask)) {
+					return apair;
+				}
+			}
+			else
+				return apair;
+		}
+		break;
+	default:
+		nat46debug(0, "%s : Invalid prefix type.\n", __func__);
+	}
+	return NULL;
+}
+
+void pairs_xlate_v6_to_v4_inner(nat46_instance_t *nat46, struct ipv6hdr *ip6h, __u32 *pv4saddr, __u32 *pv4daddr) {
+  int ipair = 0;
+  nat46_xlate_rulepair_t *apair = NULL;
+  int xlate_src = -1;
+  int xlate_dst = -1;
+
+  apair = nat46_lpm(nat46, NAT46_IPV6_REMOTE, &ip6h->daddr);
+  if (!apair) {
+    return;
+  }
+
+  if (xlate_v6_to_v4(nat46, &apair->remote, &ip6h->daddr, pv4daddr)) {
+    xlate_dst = ipair;
+  }
+  if (xlate_v6_to_v4(nat46, &apair->local, &ip6h->saddr, pv4saddr)) {
+    xlate_src = ipair;
+  }
+
+  if ((xlate_src >= 0) && (xlate_dst >= 0)) {
+    /* we did manage to translate it */
+    nat46debug(5, "[nat46payload] xlate results: src %d dst %d", xlate_src, xlate_dst);
+  } else {
+    nat46debug(1, "[nat46] Could not find a translation pair v6->v4 src %pI6c dst %pI6c", &ip6h->saddr, &ip6h->daddr);
+  }
+}
+
+/*
+ * pv6 is pointing to the ipv6 header inside the payload.
+ * Translate this header and attempt to extract the sport/dport
+ * so the callers can use them for translation as well.
+ */
+int xlate_payload6_to4(nat46_instance_t *nat46, void *pv6, void *ptrans_hdr, int v6_len, u16 *ul_sum, int *ptailTruncSize) {
+  struct ipv6hdr *ip6h = pv6;
+  __u32 v4saddr, v4daddr;
+  struct iphdr new_ipv4;
+  struct iphdr *iph = &new_ipv4;
+  u16 proto = ip6h->nexthdr;
+  u16 ipid = 0;
+  u16 ipflags = htons(IP_DF);
+  int infrag_payload_len = ntohs(ip6h->payload_len);
+
+  /*
+   * The packet is supposedly our own packet after translation - so the rules
+   * will be swapped compared to translation of the outer packet
+   */
+  pairs_xlate_v6_to_v4_inner(nat46, pv6, &v4saddr, &v4daddr);
+
+  if (proto == NEXTHDR_FRAGMENT) {
+    struct frag_hdr *fh = (struct frag_hdr*)(ip6h + 1);
+    if(fh->frag_off == 0) {
+      /* Atomic fragment */
+      proto = fh->nexthdr;
+      ipid = fold_ipv6_frag_id(fh->identification);
+      v6_len -= 8;
+      infrag_payload_len -= 8;
+      *ptailTruncSize += 8;
+      ipflags = 0;
+    }
+  }
+
+
+  switch(proto) {
+    case NEXTHDR_TCP: {
+      struct tcphdr *th = ptrans_hdr;
+      u16 sum1 = csum_ipv6_unmagic(nat46, &ip6h->saddr, &ip6h->daddr, infrag_payload_len, NEXTHDR_TCP, th->check);
+      u16 sum2 = csum_tcpudp_remagic(v4saddr, v4daddr, infrag_payload_len, NEXTHDR_TCP, sum1); /* add pseudoheader */
+      if(ul_sum) {
+        *ul_sum = csum16_upd(*ul_sum, th->check, sum2);
+        }
+      th->check = sum2;
+      break;
+      }
+    case NEXTHDR_UDP: {
+      struct udphdr *udp = ptrans_hdr;
+      u16 sum1 = csum_ipv6_unmagic(nat46, &ip6h->saddr, &ip6h->daddr, infrag_payload_len, NEXTHDR_UDP, udp->check);
+      u16 sum2 = csum_tcpudp_remagic(v4saddr, v4daddr, infrag_payload_len, NEXTHDR_UDP, sum1); /* add pseudoheader */
+      if(ul_sum) {
+        *ul_sum = csum16_upd(*ul_sum, udp->check, sum2);
+        }
+      udp->check = sum2;
+      break;
+      }
+    case NEXTHDR_ICMP: {
+      struct icmp6hdr *icmp6h = ptrans_hdr;
+      u16 sum0 = icmp6h->icmp6_cksum;
+      u16 sum1 = csum_ipv6_unmagic(nat46, &ip6h->saddr, &ip6h->daddr, infrag_payload_len, NEXTHDR_ICMP, icmp6h->icmp6_cksum);
+      if(ul_sum) {
+        *ul_sum = csum16_upd(*ul_sum, sum0, sum1);
+        }
+      icmp6h->icmp6_cksum = sum1;
+      proto = IPPROTO_ICMP;
+      switch(icmp6h->icmp6_type) {
+        case ICMPV6_ECHO_REQUEST:
+          update_icmp6_type_code(nat46, icmp6h, ICMP_ECHO, icmp6h->icmp6_code);
+          break;
+        case ICMPV6_ECHO_REPLY:
+          update_icmp6_type_code(nat46, icmp6h, ICMP_ECHOREPLY, icmp6h->icmp6_code);
+          break;
+        default:
+          break;
+      }
+    }
+  }
+
+  fill_v4hdr_from_v6hdr(iph, ip6h, v4saddr, v4daddr, ipid, ipflags, proto, infrag_payload_len);
+  if(ul_sum) {
+    *ul_sum = unchecksum16(pv6, (((u8 *)ptrans_hdr)-((u8 *)pv6))/2, *ul_sum);
+    *ul_sum = rechecksum16(iph, 10, *ul_sum);
+  }
+
+  memmove(((char *)pv6) + IPV4HDRSIZE, get_next_header_ptr6(ip6h, v6_len), v6_len - IPV4HDRSIZE);
+  memcpy(pv6, iph, IPV4HDRSIZE);
+  *ptailTruncSize += IPV6V4HDRDELTA;
+  return (v6_len - IPV6V4HDRDELTA);
+}
+
+u8 *icmp_parameter_ptr(struct icmphdr *icmph) {
+  u8 *icmp_pptr = ((u8 *)(icmph))+4;
+  return icmp_pptr;
+}
+
+u32 *icmp6_parameter_ptr(struct icmp6hdr *icmp6h) {
+  u32 *icmp6_pptr = ((u32 *)(icmp6h))+1;
+  return icmp6_pptr;
+}
+
+static void nat46_fixup_icmp6_dest_unreach(nat46_instance_t *nat46, struct ipv6hdr *ip6h, struct icmp6hdr *icmp6h, struct sk_buff *old_skb, int *ptailTruncSize) {
+  /*
+   * Destination Unreachable (Type 1)  Set the Type to 3, and adjust
+   * the ICMP checksum both to take the type/code change into
+   * account and to exclude the ICMPv6 pseudo-header.
+   *
+   * Translate the Code as follows:
+   *
+   * Code 0 (No route to destination):  Set the Code to 1 (Host
+   *            unreachable).
+   *
+   * Code 1 (Communication with destination administratively
+   *        prohibited):  Set the Code to 10 (Communication with
+   *        destination host administratively prohibited).
+   *
+   * Code 2 (Beyond scope of source address):  Set the Code to 1
+   *        (Host unreachable).  Note that this error is very unlikely
+   *        since an IPv4-translatable source address is typically
+   *        considered to have global scope.
+   *
+   * Code 3 (Address unreachable):  Set the Code to 1 (Host
+   *        unreachable).
+   *
+   * Code 4 (Port unreachable):  Set the Code to 3 (Port
+   *        unreachable).
+   *
+   * Other Code values:  Silently drop.
+   */
+
+  int len;
+
+  switch(icmp6h->icmp6_code) {
+    case 0:
+    case 2:
+    case 3:
+      update_icmp6_type_code(nat46, icmp6h, 3, 1);
+      break;
+    case 1:
+      update_icmp6_type_code(nat46, icmp6h, 3, 10);
+      break;
+    case 4:
+      update_icmp6_type_code(nat46, icmp6h, 3, 3);
+      break;
+    default:
+      ip6h->nexthdr = NEXTHDR_NONE;
+  }
+  len = ntohs(ip6h->payload_len)-sizeof(*icmp6h);
+  len = xlate_payload6_to4(nat46, (icmp6h + 1), get_next_header_ptr6((icmp6h + 1), len), len, &icmp6h->icmp6_cksum, ptailTruncSize);
+}
+
+static void nat46_fixup_icmp6_pkt_toobig(nat46_instance_t *nat46, struct ipv6hdr *ip6h, struct icmp6hdr *icmp6h, struct sk_buff *old_skb, int *ptailTruncSize) {
+  /*
+   * Packet Too Big (Type 2):  Translate to an ICMPv4 Destination
+   * Unreachable (Type 3) with Code 4, and adjust the ICMPv4
+   * checksum both to take the type change into account and to
+   * exclude the ICMPv6 pseudo-header.  The MTU field MUST be
+   * adjusted for the difference between the IPv4 and IPv6 header
+   * sizes, taking into account whether or not the packet in error
+   * includes a Fragment Header, i.e., minimum(advertised MTU-20,
+   * MTU_of_IPv4_nexthop, (MTU_of_IPv6_nexthop)-20).
+   *
+   * See also the requirements in Section 6.
+   *
+   * Section 6 says this for v6->v4 side translation:
+   *
+   * 2.  In the IPv6-to-IPv4 direction:
+   *
+   *        A.  If there is a Fragment Header in the IPv6 packet, the last 16
+   *            bits of its value MUST be used for the IPv4 identification
+   *            value.
+   *
+   *        B.  If there is no Fragment Header in the IPv6 packet:
+   *
+   *            a.  If the packet is less than or equal to 1280 bytes:
+   *
+   *                -  The translator SHOULD set DF to 0 and generate an IPv4
+   *                   identification value.
+   *
+   *                -  To avoid the problems described in [RFC4963], it is
+   *                   RECOMMENDED that the translator maintain 3-tuple state
+   *                   for generating the IPv4 identification value.
+   *
+   *            b.  If the packet is greater than 1280 bytes, the translator
+   *                SHOULD set the IPv4 DF bit to 1.
+   */
+  int len = ntohs(ip6h->payload_len)-sizeof(*icmp6h);
+  u16 *pmtu = ((u16 *)icmp6h) + 3; /* IPv4-compatible MTU value is 16 bit */
+  u16 old_csum = icmp6h->icmp6_cksum;
+
+  if (ntohs(*pmtu) > IPV6V4HDRDELTA) {
+    icmp6h->icmp6_cksum = csum16_upd(old_csum, *pmtu, htons(ntohs(*pmtu) - IPV6V4HDRDELTA));
+    *pmtu = htons(ntohs(*pmtu) - IPV6V4HDRDELTA);
+  }
+
+  len = xlate_payload6_to4(nat46, (icmp6h + 1), get_next_header_ptr6((icmp6h + 1), len), len, &icmp6h->icmp6_cksum, ptailTruncSize);
+
+  update_icmp6_type_code(nat46, icmp6h, 3, 4);
+
+}
+
+static void nat46_fixup_icmp6_time_exceed(nat46_instance_t *nat46, struct ipv6hdr *ip6h, struct icmp6hdr *icmp6h, struct sk_buff *old_skb, int *ptailTruncSize) {
+  /*
+   * Time Exceeded (Type 3):  Set the Type to 11, and adjust the ICMPv4
+   * checksum both to take the type change into account and to
+   * exclude the ICMPv6 pseudo-header.  The Code is unchanged.
+   */
+  int len = ntohs(ip6h->payload_len)-sizeof(*icmp6h);
+  len = xlate_payload6_to4(nat46, (icmp6h + 1), get_next_header_ptr6((icmp6h + 1), len), len, &icmp6h->icmp6_cksum, ptailTruncSize);
+
+  update_icmp6_type_code(nat46, icmp6h, 11, icmp6h->icmp6_code);
+}
+
+static void nat46_fixup_icmp6_paramprob(nat46_instance_t *nat46, struct ipv6hdr *ip6h, struct icmp6hdr *icmp6h, struct sk_buff *old_skb, int *ptailTruncSize) {
+  /*
+   *         Parameter Problem (Type 4):  Translate the Type and Code as
+   *         follows, and adjust the ICMPv4 checksum both to take the type/
+   *         code change into account and to exclude the ICMPv6 pseudo-
+   *         header.
+   *
+   *         Translate the Code as follows:
+   *
+   *         Code 0 (Erroneous header field encountered):  Set to Type 12,
+   *            Code 0, and update the pointer as defined in Figure 6.  (If
+   *            the Original IPv6 Pointer Value is not listed or the
+   *            Translated IPv4 Pointer Value is listed as "n/a", silently
+   *            drop the packet.)
+   *
+   *         Code 1 (Unrecognized Next Header type encountered):  Translate
+   *            this to an ICMPv4 protocol unreachable (Type 3, Code 2).
+   *
+   *         Code 2 (Unrecognized IPv6 option encountered):  Silently drop.
+   *
+   *      Unknown error messages:  Silently drop.
+   *
+   *     +--------------------------------+--------------------------------+
+   *     |   Original IPv6 Pointer Value  | Translated IPv4 Pointer Value  |
+   *     +--------------------------------+--------------------------------+
+   *     |  0  | Version/Traffic Class    |  0  | Version/IHL, Type Of Ser |
+   *     |  1  | Traffic Class/Flow Label |  1  | Type Of Service          |
+   *     | 2,3 | Flow Label               | n/a |                          |
+   *     | 4,5 | Payload Length           |  2  | Total Length             |
+   *     |  6  | Next Header              |  9  | Protocol                 |
+   *     |  7  | Hop Limit                |  8  | Time to Live             |
+   *     | 8-23| Source Address           | 12  | Source Address           |
+   *     |24-39| Destination Address      | 16  | Destination Address      |
+   *     +--------------------------------+--------------------------------+
+   */
+  static int ptr6_4[] = { 0, 1, -1, -1, 2, 2, 9, 8,
+                          12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12, 12,
+                          16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, 16, -1 };
+  u32 *pptr6 = icmp6_parameter_ptr(icmp6h);
+  u8 *pptr4 = icmp_parameter_ptr((struct icmphdr *)icmp6h);
+  int8_t new_pptr = -1;
+  int len = ntohs(ip6h->payload_len)-sizeof(*icmp6h);
+
+  switch(icmp6h->icmp6_code) {
+    case 1:
+      update_icmp6_type_code(nat46, icmp6h, 3, 2);
+      break;
+    case 0:
+      if(*pptr6 < sizeof(ptr6_4)/sizeof(ptr6_4[0])) {
+        new_pptr = ptr6_4[*pptr6];
+        if (new_pptr >= 0) {
+          icmp6h->icmp6_cksum = csum16_upd(icmp6h->icmp6_cksum, (*pptr6 & 0xffff), (new_pptr << 8));
+          *pptr4 = 0xff & new_pptr;
+          update_icmp6_type_code(nat46, icmp6h, 12, 0);
+          break;
+        }
+      }
+#if __has_attribute(__fallthrough__)
+       __attribute__((__fallthrough__));
+#endif
+    case 2: /* fallthrough to default */
+    default:
+      ip6h->nexthdr = NEXTHDR_NONE;
+      return;
+  }
+
+  len = xlate_payload6_to4(nat46, (icmp6h + 1), get_next_header_ptr6((icmp6h + 1), len), len, &icmp6h->icmp6_cksum, ptailTruncSize);
+}
+
+/* Fixup ICMP6->ICMP before IP header translation, according to http://tools.ietf.org/html/rfc6145 */
+
+static void nat46_fixup_icmp6(nat46_instance_t *nat46, struct ipv6hdr *ip6h, struct icmp6hdr *icmp6h, struct sk_buff *old_skb, int *ptailTruncSize) {
+
+  if(icmp6h->icmp6_type & 128) {
+    /* Informational ICMP */
+    switch(icmp6h->icmp6_type) {
+      case ICMPV6_ECHO_REQUEST:
+        update_icmp6_type_code(nat46, icmp6h, ICMP_ECHO, icmp6h->icmp6_code);
+        break;
+      case ICMPV6_ECHO_REPLY:
+        update_icmp6_type_code(nat46, icmp6h, ICMP_ECHOREPLY, icmp6h->icmp6_code);
+        break;
+      default:
+        ip6h->nexthdr = NEXTHDR_NONE;
+    }
+  } else {
+    /* ICMPv6 errors */
+    switch(icmp6h->icmp6_type) {
+      case ICMPV6_DEST_UNREACH:
+        nat46_fixup_icmp6_dest_unreach(nat46, ip6h, icmp6h, old_skb, ptailTruncSize);
+        break;
+      case ICMPV6_PKT_TOOBIG:
+        nat46_fixup_icmp6_pkt_toobig(nat46, ip6h, icmp6h, old_skb, ptailTruncSize);
+        break;
+      case ICMPV6_TIME_EXCEED:
+        nat46_fixup_icmp6_time_exceed(nat46, ip6h, icmp6h, old_skb, ptailTruncSize);
+        break;
+      case ICMPV6_PARAMPROB:
+        nat46_fixup_icmp6_paramprob(nat46, ip6h, icmp6h, old_skb, ptailTruncSize);
+        break;
+      default:
+        ip6h->nexthdr = NEXTHDR_NONE;
+    }
+  }
+}
+
+
+int ip6_input_not_interested(nat46_instance_t *nat46, struct ipv6hdr *ip6h, struct sk_buff *old_skb) {
+  if (old_skb->protocol != htons(ETH_P_IPV6)) {
+    nat46debug(3, "Not an IPv6 packet");
+    return 1;
+  }
+  if(old_skb->len < sizeof(struct ipv6hdr) || ip6h->version != 6) {
+    nat46debug(3, "Len short or not correct version: %d", ip6h->version);
+    return 1;
+  }
+  if (!(ipv6_addr_type(&ip6h->saddr) & IPV6_ADDR_UNICAST)) {
+    nat46debug(3, "Source address not unicast");
+    return 1;
+  }
+  return 0;
+}
+
+static uint16_t nat46_fixup_icmp_time_exceeded(nat46_instance_t *nat46, struct iphdr *iph,
+			struct icmphdr *icmph, struct sk_buff *old_skb, uint16_t *sport, uint16_t *dport) {
+  /*
+   * Set the Type to 3, and adjust the
+   * ICMP checksum both to take the type change into account and
+   * to include the ICMPv6 pseudo-header.  The Code is unchanged.
+   */
+  icmph->type = 3;
+  return xlate_pkt_in_err_v4_to_v6(nat46, iph, old_skb, sport, dport);
+}
+
+static uint16_t nat46_fixup_icmp_parameterprob(nat46_instance_t *nat46, struct iphdr *iph,
+			struct icmphdr *icmph, struct sk_buff *old_skb, uint16_t *sport, uint16_t *dport) {
+  /*
+   * Set the Type to 4, and adjust the
+   * ICMP checksum both to take the type/code change into account
+   * and to include the ICMPv6 pseudo-header.
+   *
+   * Translate the Code as follows:
+   *
+   * Code 0 (Pointer indicates the error):  Set the Code to 0
+   * (Erroneous header field encountered) and update the
+   * pointer as defined in Figure 3.  (If the Original IPv4
+   * Pointer Value is not listed or the Translated IPv6
+   * Pointer Value is listed as "n/a", silently drop the
+   * packet.)
+   *
+   * Code 1 (Missing a required option):  Silently drop.
+   *
+   * Code 2 (Bad length):  Set the Code to 0 (Erroneous header
+   * field encountered) and update the pointer as defined in
+   * Figure 3.  (If the Original IPv4 Pointer Value is not
+   * listed or the Translated IPv6 Pointer Value is listed as
+   * "n/a", silently drop the packet.)
+   *
+   *            Other Code values:  Silently drop.
+   *
+   *     +--------------------------------+--------------------------------+
+   *     |   Original IPv4 Pointer Value  | Translated IPv6 Pointer Value  |
+   *     +--------------------------------+--------------------------------+
+   *     |  0  | Version/IHL              |  0  | Version/Traffic Class    |
+   *     |  1  | Type Of Service          |  1  | Traffic Class/Flow Label |
+   *     | 2,3 | Total Length             |  4  | Payload Length           |
+   *     | 4,5 | Identification           | n/a |                          |
+   *     |  6  | Flags/Fragment Offset    | n/a |                          |
+   *     |  7  | Fragment Offset          | n/a |                          |
+   *     |  8  | Time to Live             |  7  | Hop Limit                |
+   *     |  9  | Protocol                 |  6  | Next Header              |
+   *     |10,11| Header Checksum          | n/a |                          |
+   *     |12-15| Source Address           |  8  | Source Address           |
+   *     |16-19| Destination Address      | 24  | Destination Address      |
+   *     +--------------------------------+--------------------------------+
+   */
+  static int ptr4_6[] = { 0, 1, 4, 4, -1, -1, -1, -1, 7, 6, -1, -1, 8, 8, 8, 8, 24, 24, 24, 24, -1 };
+  u8 *icmp_pptr = icmp_parameter_ptr(icmph);
+  u32 *icmp6_pptr = icmp6_parameter_ptr((struct icmp6hdr *)icmph);
+  int8_t new_pptr = -1;
+
+  icmph->type = 4;
+
+  switch (icmph->code) {
+    case 0:
+    case 2:
+      if (*icmp_pptr < (sizeof(ptr4_6)/sizeof(ptr4_6[0]))) {
+        icmph->code = 0;
+        new_pptr = ptr4_6[*icmp_pptr];
+        if (new_pptr >= 0) {
+          *icmp6_pptr = new_pptr;
+          return xlate_pkt_in_err_v4_to_v6(nat46, iph, old_skb, sport, dport);
+        }
+      }
+#if __has_attribute(__fallthrough__)
+      __attribute__((__fallthrough__));
+#endif
+    default:
+      iph->protocol = NEXTHDR_NONE;
+  }
+  return 0;
+}
+
+static uint16_t nat46_fixup_icmp_dest_unreach(nat46_instance_t *nat46, struct iphdr *iph,
+			struct icmphdr *icmph, struct sk_buff *old_skb, uint16_t *sport, uint16_t *dport) {
+  /*
+   *    Translate the Code as
+   *    described below, set the Type to 1, and adjust the ICMP
+   *    checksum both to take the type/code change into account and
+   *    to include the ICMPv6 pseudo-header.
+   *
+   *    Translate the Code as follows:
+   *
+   *    Code 0, 1 (Net Unreachable, Host Unreachable):  Set the Code
+   *       to 0 (No route to destination).
+   *
+   *    Code 2 (Protocol Unreachable):  Translate to an ICMPv6
+   *       Parameter Problem (Type 4, Code 1) and make the Pointer
+   *       point to the IPv6 Next Header field.
+   *
+   *    Code 3 (Port Unreachable):  Set the Code to 4 (Port
+   *       unreachable).
+   *
+   *    Code 4 (Fragmentation Needed and DF was Set):  Translate to
+   *       an ICMPv6 Packet Too Big message (Type 2) with Code set
+   *       to 0.  The MTU field MUST be adjusted for the difference
+   *       between the IPv4 and IPv6 header sizes, i.e.,
+   *       minimum(advertised MTU+20, MTU_of_IPv6_nexthop,
+   *       (MTU_of_IPv4_nexthop)+20).  Note that if the IPv4 router
+   *       set the MTU field to zero, i.e., the router does not
+   *       implement [RFC1191], then the translator MUST use the
+   *       plateau values specified in [RFC1191] to determine a
+   *       likely path MTU and include that path MTU in the ICMPv6
+   *       packet.  (Use the greatest plateau value that is less
+   *       than the returned Total Length field.)
+   *
+   *       See also the requirements in Section 6.
+   *
+   *    Code 5 (Source Route Failed):  Set the Code to 0 (No route
+   *       to destination).  Note that this error is unlikely since
+   *       source routes are not translated.
+   *
+   *    Code 6, 7, 8:  Set the Code to 0 (No route to destination).
+   *
+   *    Code 9, 10 (Communication with Destination Host
+   *       Administratively Prohibited):  Set the Code to 1
+   *       (Communication with destination administratively
+   *       prohibited).
+   *
+   *    Code 11, 12:  Set the Code to 0 (No route to destination).
+   *
+   *    Code 13 (Communication Administratively Prohibited):  Set
+   *       the Code to 1 (Communication with destination
+   *       administratively prohibited).
+   *
+   *    Code 14 (Host Precedence Violation):  Silently drop.
+   *
+   *    Code 15 (Precedence cutoff in effect):  Set the Code to 1
+   *       (Communication with destination administratively
+   *       prohibited).
+   *
+   *    Other Code values:  Silently drop.
+   *
+   */
+
+  u16 *pmtu = ((u16 *)icmph) + 3; /* IPv4-compatible MTU value is 16 bit */
+
+  icmph->type = 1;
+
+  switch (icmph->code) {
+    case 0:
+    case 1:
+      icmph->code = 0;
+      break;
+    case 2: {
+      u32 *icmp6_pptr = icmp6_parameter_ptr((struct icmp6hdr *)icmph);
+      *icmp6_pptr = 6; /* Offset to Next Proto field in IPv6 header. */
+      icmph->type = 4;
+      icmph->code = 1;
+      nat46debug(3, "ICMP Proto Unreachable translated into IPv6 Param Prob.\n");
+      break;
+    }
+    case 3:
+      icmph->code = 4;
+      break;
+    case 4:
+      /*
+       * On adjusting the signaled MTU within packet:
+       *
+       * IPv4 has 20 bytes smaller header size, so, standard says
+       * we can advertise a higher MTU here. However, then we will
+       * need to ensure it does not overshoot our egress link MTU,
+       * which implies knowing the egress interface, which is
+       * not trivial in the current model.
+       *
+       * So, we'd want to leave the MTU as aside. But, the Section 6
+       * has something more to say:
+       *
+       *   1.  In the IPv4-to-IPv6 direction: if the MTU value of ICMPv4 Packet
+       *     Too Big (PTB) messages is less than 1280, change it to 1280.
+       *     This is intended to cause the IPv6 host and IPv6 firewall to
+       *     process the ICMP PTB message and generate subsequent packets to
+       *     this destination with an IPv6 Fragment Header.
+       *
+       */
+      icmph->type = 2;
+      icmph->code = 0;
+      if (ntohs(*pmtu) < 1280) {
+        *pmtu = htons(1280);
+      }
+      break;
+    case 5:
+    case 6:
+    case 7:
+    case 8:
+      icmph->code = 0;
+      break;
+    case 9:
+    case 10:
+      icmph->code = 1;
+      break;
+    case 11:
+    case 12:
+      icmph->code = 0;
+      break;
+    case 13:
+    case 15:
+      icmph->code = 1;
+      break;
+    default:
+      iph->protocol = NEXTHDR_NONE;
+      return 0;
+  }
+  return xlate_pkt_in_err_v4_to_v6(nat46, iph, old_skb, sport, dport);
+}
+
+/* Fixup ICMP->ICMP6 before IP header translation, according to http://tools.ietf.org/html/rfc6145 */
+
+static uint16_t nat46_fixup_icmp(nat46_instance_t *nat46, struct iphdr *iph,
+			struct sk_buff *old_skb, uint16_t *sport, uint16_t *dport) {
+  struct icmphdr *icmph = (struct icmphdr *)(iph+1);
+  uint16_t ret = 0;
+
+  iph->protocol = NEXTHDR_ICMP;
+
+  switch(icmph->type) {
+    case ICMP_ECHO:
+      icmph->type = ICMPV6_ECHO_REQUEST;
+      *sport = *dport = icmph->un.echo.id;
+      nat46debug(3, "ICMP echo request translated into IPv6, id: %d", ntohs(ret));
+      break;
+    case ICMP_ECHOREPLY:
+      icmph->type = ICMPV6_ECHO_REPLY;
+      *sport = *dport = icmph->un.echo.id;
+      nat46debug(3, "ICMP echo reply translated into IPv6, id: %d", ntohs(ret));
+      break;
+    case ICMP_TIME_EXCEEDED:
+      ret = nat46_fixup_icmp_time_exceeded(nat46, iph, icmph, old_skb, sport, dport);
+      break;
+    case ICMP_PARAMETERPROB:
+      ret = nat46_fixup_icmp_parameterprob(nat46, iph, icmph, old_skb, sport, dport);
+      break;
+    case ICMP_DEST_UNREACH:
+      ret = nat46_fixup_icmp_dest_unreach(nat46, iph, icmph, old_skb, sport, dport);
+      break;
+    default:
+      /* Silently drop. */
+      iph->protocol = NEXTHDR_NONE;
+  }
+  return ret;
+}
+
+int pairs_xlate_v6_to_v4_outer(nat46_instance_t *nat46, nat46_xlate_rulepair_t **papair,
+		struct ipv6hdr *ip6h, uint16_t proto, __u32 *pv4saddr, __u32 *pv4daddr) {
+  int ipair = 0;
+  int xlate_src = -1;
+  int xlate_dst = -1;
+  nat46_xlate_rulepair_t *apair;
+
+  apair = nat46_lpm(nat46, NAT46_IPV6_REMOTE, &ip6h->saddr);
+  if (!apair) {
+    return 0;
+  }
+
+  *papair = apair;
+  if (xlate_v6_to_v4(nat46, &apair->local, &ip6h->daddr, pv4daddr)) {
+    nat46debug(5, "Dst addr %pI6 to %pI4 \n", &ip6h->daddr, pv4daddr);
+    xlate_dst = ipair;
+  }
+  if (xlate_v6_to_v4(nat46, &apair->remote, &ip6h->saddr, pv4saddr)) {
+    nat46debug(5, "Src addr %pI6 to %pI4 \n", &ip6h->saddr, pv4saddr);
+    xlate_src = ipair;
+  }
+  if (xlate_dst >= 0) {
+    if (xlate_src < 0) {
+      if (proto == NEXTHDR_ICMP) {
+        nat46debug(1, "[nat46] Could not translate remote address v6->v4, ipair %d, for ICMP6 use dest addr", ipair);
+        *pv4saddr = *pv4daddr;
+        xlate_src = xlate_dst;
+      } else {
+        nat46debug(5, "[nat46] Could not translate remote address v6->v4, ipair %d", ipair);
+      }
+    }
+  } else {
+    nat46debug(1, "[nat46] Could not find a translation pair v6->v4 src %pI6c dst %pI6c", &ip6h->saddr, &ip6h->daddr);
+  }
+  nat46debug(5, "[nat46] pairs_xlate_v6_to_v4_outer result src %d dst %d", xlate_src, xlate_dst);
+  return ( (xlate_src >= 0) && (xlate_dst >= 0) );
+}
+
+int xlate_6_to_4(struct net_device *dev, struct ipv6hdr *ip6h, uint16_t proto, __u32 *pv4saddr, __u32 *pv4daddr) {
+	nat46_xlate_rulepair_t *apair;
+	return pairs_xlate_v6_to_v4_outer(netdev_nat46_instance(dev), &apair, ip6h, proto, pv4saddr, pv4daddr);
+}
+EXPORT_SYMBOL(xlate_6_to_4);
+
+void nat46_ipv6_input(struct sk_buff *old_skb) {
+  struct ipv6hdr *ip6h = ipv6_hdr(old_skb);
+  nat46_xlate_rulepair_t *apair;
+  nat46_instance_t *nat46 = get_nat46_instance(old_skb);
+  uint16_t proto;
+  uint16_t frag_off;
+  uint16_t frag_id;
+
+  struct iphdr * iph;
+  __u32 v4saddr, v4daddr;
+  struct sk_buff *new_skb = NULL;
+  struct sk_buff *reasm_skb = NULL;
+  int truncSize = 0;
+  int tailTruncSize = 0;
+  int v6packet_l3size = sizeof(*ip6h);
+  int l3_infrag_payload_len = ntohs(ip6h->payload_len);
+  int check_for_l4 = 0;
+
+  if (nat46 == NULL) {
+    printk("nat46:%p skb is dropped for no valid instance found\n", old_skb);
+    return;
+  }
+
+  nat46debug(4, "nat46_ipv6_input packet");
+
+  if(ip6_input_not_interested(nat46, ip6h, old_skb)) {
+    nat46debug(1, "nat46_ipv6_input not interested");
+    goto done;
+  }
+  nat46debug(5, "nat46_ipv6_input next hdr: %d, len: %d, is_fragment: %d",
+                ip6h->nexthdr, old_skb->len, ip6h->nexthdr == NEXTHDR_FRAGMENT);
+  proto = ip6h->nexthdr;
+  if (proto == NEXTHDR_FRAGMENT) {
+    struct frag_hdr *fh = (struct frag_hdr*)(ip6h + 1);
+    v6packet_l3size += sizeof(struct frag_hdr);
+    l3_infrag_payload_len -= sizeof(struct frag_hdr);
+    nat46debug(2, "Fragment ID: %08X", fh->identification);
+    nat46debug_dump(nat46, 6, fh, ntohs(ip6h->payload_len));
+
+    if(fh->frag_off == 0) {
+      /* Atomic fragment */
+      proto = fh->nexthdr;
+      frag_off = 0; /* no DF bit */
+      frag_id = fold_ipv6_frag_id(fh->identification);
+      nat46debug(2, "Atomic fragment");
+      check_for_l4 = 1;
+    } else {
+      if (0 == (ntohs(fh->frag_off) & IP6_OFFSET)) {
+        /* First fragment. Pretend business as usual, but when creating IP, set the "MF" bit. */
+        frag_off = htons(((ntohs(fh->frag_off) & 7) << 13) + (((ntohs(fh->frag_off) >> 3) & 0x1FFF)));
+        frag_id = fold_ipv6_frag_id(fh->identification);
+	/* ntohs(fh->frag_off) & IP6_MF */
+        proto = fh->nexthdr;
+        check_for_l4 = 1;
+        nat46debug(2, "First fragment, frag_off: %04X, frag id: %04X orig frag_off: %04X", ntohs(frag_off), frag_id, ntohs(fh->frag_off));
+      } else {
+        /* Not the first fragment - leave as is, allow to translate IPv6->IPv4 */
+        proto = fh->nexthdr;
+        frag_off = htons(((ntohs(fh->frag_off) & 7) << 13) + (((ntohs(fh->frag_off) >> 3) & 0x1FFF)));
+        frag_id = fold_ipv6_frag_id(fh->identification);
+        nat46debug(2, "Not first fragment, frag_off: %04X, frag id: %04X orig frag_off: %04X", ntohs(frag_off), frag_id, ntohs(fh->frag_off));
+      }
+
+      /* ICMPv6 counts the pseudo ipv6 header into its checksum, but ICMP doesn't
+       * but the length filed of the pseudo header count in all fragmented
+       * packets, so we need gather the framented packets into one packet to
+       * get the l3 payload length.
+       */
+      if (proto == NEXTHDR_ICMP) {
+        struct sk_buff *skb = skb_get(old_skb);
+	int err;
+	if (skb == NULL) {
+          goto done;
+	}
+
+        err = nf_ct_frag6_gather(dev_net(old_skb->dev), skb, IP6_DEFRAG_LOCAL_DELIVER);
+
+	/* EINPROGRESS means the skb was queued but the gather not finished yet */
+        if (err == -EINPROGRESS) {
+          goto done;
+        }
+
+	reasm_skb = skb;
+	/* other than EINPROGRESS error returned means the skb wasn't queued
+	 * 0 returned means that all fragments are all gathered
+	 * and the original skb was queued
+	 */
+        if (err != 0) {
+          goto done;
+        }
+
+        /* Use the reassembly packet as the input */
+        ip6h = ipv6_hdr(reasm_skb);
+        proto = ip6h->nexthdr;
+        v6packet_l3size = sizeof(*ip6h);
+
+	/* No fragment header in the re-assembly packet */
+        frag_off = 0;
+        l3_infrag_payload_len = ntohs(ip6h->payload_len);
+        old_skb = reasm_skb;
+	check_for_l4 = 1;
+      }
+    }
+  } else {
+    frag_off = htons(IP_DF);
+    frag_id = get_next_ip_id();
+    check_for_l4 = 1;
+  }
+
+  if (!pairs_xlate_v6_to_v4_outer(nat46, &apair, ip6h, proto, &v4saddr, &v4daddr)) {
+    if (proto == NEXTHDR_ICMP) {
+      struct icmp6hdr *icmp6h = add_offset(ip6h, v6packet_l3size);
+      struct ipv6hdr *ip6h_inner = (struct ipv6hdr *) (icmp6h + 1);
+      struct ipv6hdr hdr6;
+      switch(icmp6h->icmp6_type) {
+        case ICMPV6_DEST_UNREACH:
+        case ICMPV6_PKT_TOOBIG:
+        case ICMPV6_TIME_EXCEED:
+        case ICMPV6_PARAMPROB:
+          /*
+           * For icmpv6 error message, using the original message
+           * address to  locate the apair one more time according
+           * to the RFC 2473, and use the ipv4 address of the
+           * tunnel as SRC ipv4 address
+           */
+          memcpy(&hdr6.saddr, &ip6h_inner->daddr, 16);
+          memcpy(&hdr6.daddr, &ip6h_inner->saddr, 16);
+          if (!pairs_xlate_v6_to_v4_outer(nat46, &apair, &hdr6, proto, &v4saddr, &v4daddr)) {
+            if (net_ratelimit()) {
+              nat46debug(0, "[nat46] Could not translate v6->v4");
+            }
+            goto done;
+          }
+          v4saddr = apair->local.v4_pref;
+          break;
+        default:
+          nat46debug(0, "[nat46] Could not translate v6->v4");
+          goto done;
+      }
+    } else {
+      nat46debug(0, "[nat46] Could not translate v6->v4");
+      goto done;
+    }
+  }
+
+  if (check_for_l4) {
+    switch(proto) {
+      /* CHECKSUMS UPDATE */
+      case NEXTHDR_TCP: {
+        struct tcphdr *th = add_offset(ip6h, v6packet_l3size);
+
+	/* TCP payload length won't change, needn't unmagic its value. */
+        u16 sum1 = csum_ipv6_unmagic(nat46, &ip6h->saddr, &ip6h->daddr, 0, NEXTHDR_TCP, th->check);
+        u16 sum2 = csum_tcpudp_remagic(v4saddr, v4daddr, 0, NEXTHDR_TCP, sum1);
+        th->check = sum2;
+        break;
+        }
+      case NEXTHDR_UDP: {
+        struct udphdr *udp = add_offset(ip6h, v6packet_l3size);
+
+	/* UDP payload length won't change, needn't unmagic its value.
+	 * UDP checksum zero then skip the calculation of the checksum.
+	 */
+	if (udp->check) {
+          u16 sum1 = csum_ipv6_unmagic(nat46, &ip6h->saddr, &ip6h->daddr, 0, NEXTHDR_UDP, udp->check);
+          u16 sum2 = csum_tcpudp_remagic(v4saddr, v4daddr, 0, NEXTHDR_UDP, sum1);
+          udp->check = sum2;
+	}
+        break;
+        }
+      case NEXTHDR_ICMP: {
+        struct icmp6hdr *icmp6h = add_offset(ip6h, v6packet_l3size);
+
+	/* ICMPv6 count the pseudo IPv6 header into its checksum, but icmp
+	 * doesn't, unmagic the whole the pseudo IPv6 header from the checksum.
+	 */
+        u16 sum1 = csum_ipv6_unmagic(nat46, &ip6h->saddr, &ip6h->daddr, l3_infrag_payload_len, NEXTHDR_ICMP, icmp6h->icmp6_cksum);
+        icmp6h->icmp6_cksum = sum1;
+        nat46debug_dump(nat46, 10, icmp6h, l3_infrag_payload_len);
+        nat46_fixup_icmp6(nat46, ip6h, icmp6h, old_skb, &tailTruncSize);
+        proto = IPPROTO_ICMP;
+        break;
+        }
+      default:
+        break;
+    }
+  } else {
+    if(NEXTHDR_ICMP == proto) {
+      proto = IPPROTO_ICMP;
+    }
+  }
+
+  new_skb = skb_copy(old_skb, GFP_ATOMIC); // other possible option: GFP_ATOMIC
+  if (!new_skb) {
+    nat46debug(0, "[nat46] Could not copy v6 skb");
+    goto done;
+  }
+
+  /* Remove any debris in the socket control block */
+  memset(IPCB(new_skb), 0, sizeof(struct inet_skb_parm));
+  /* Remove netfilter references to IPv6 packet, new netfilter references will be created based on IPv4 packet */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)
+  nf_reset(new_skb);
+#else
+  skb_ext_reset(new_skb);
+  nf_reset_ct(new_skb);
+#endif
+
+  /* modify packet: actual IPv6->IPv4 transformation */
+  truncSize = v6packet_l3size - sizeof(struct iphdr); /* chop first 20 bytes */
+  skb_pull(new_skb, truncSize);
+  skb_put(new_skb, -tailTruncSize);
+  l3_infrag_payload_len -= tailTruncSize;
+  skb_reset_network_header(new_skb);
+  skb_set_transport_header(new_skb,IPV4HDRSIZE); /* transport (TCP/UDP/ICMP/...) header starts after 20 bytes */
+
+  /* build IPv4 header */
+  iph = ip_hdr(new_skb);
+  fill_v4hdr_from_v6hdr(iph, ip6h, v4saddr, v4daddr, frag_id, frag_off, proto, l3_infrag_payload_len);
+  new_skb->protocol = htons(ETH_P_IP);
+
+  nat46debug(5, "about to send v4 packet, flags: %02x",  IPCB(new_skb)->flags);
+  nat46_netdev_count_xmit(new_skb, old_skb->dev);
+
+  /* set skb->iif */
+  new_skb->skb_iif = old_skb->skb_iif;
+
+  netif_rx(new_skb);
+
+  /* TBD: should copy be released here? */
+
+done:
+  if (reasm_skb) {
+    kfree_skb(reasm_skb);
+  }
+  release_nat46_instance(nat46);
+}
+
+void ip6_update_csum(struct sk_buff * skb, struct ipv6hdr * ip6hdr, int do_atomic_frag)
+{
+  u32 sum1=0;
+  u16 sum2=0;
+  __sum16 oldsum = 0;
+
+  switch (ip6hdr->nexthdr) {
+    case IPPROTO_TCP: {
+      struct tcphdr *th = tcp_hdr(skb);
+      unsigned tcplen = 0;
+
+      oldsum = th->check;
+      tcplen = ntohs(ip6hdr->payload_len) - (do_atomic_frag?8:0); /* TCP header + payload */
+      th->check = 0;
+      sum1 = csum_partial((char*)th, tcplen, 0); /* calculate checksum for TCP hdr+payload */
+      sum2 = csum_ipv6_magic(&ip6hdr->saddr, &ip6hdr->daddr, tcplen, ip6hdr->nexthdr, sum1); /* add pseudoheader */
+      th->check = sum2;
+      break;
+      }
+    case IPPROTO_UDP: {
+      struct udphdr *udp = udp_hdr(skb);
+      unsigned udplen = ntohs(ip6hdr->payload_len) - (do_atomic_frag?8:0); /* UDP hdr + payload */
+
+      oldsum = udp->check;
+      udp->check = 0;
+
+      sum1 = csum_partial((char*)udp, udplen, 0); /* calculate checksum for UDP hdr+payload */
+      sum2 = csum_ipv6_magic(&ip6hdr->saddr, &ip6hdr->daddr, udplen, ip6hdr->nexthdr, sum1); /* add pseudoheader */
+
+      udp->check = sum2;
+
+      break;
+      }
+    case NEXTHDR_ICMP: {
+      struct icmp6hdr *icmp6h = icmp6_hdr(skb);
+      unsigned icmp6len = 0;
+      icmp6len = ntohs(ip6hdr->payload_len) - (do_atomic_frag?8:0); /* ICMP header + payload */
+      icmp6h->icmp6_cksum = 0;
+      sum1 = csum_partial((char*)icmp6h, icmp6len, 0); /* calculate checksum for TCP hdr+payload */
+      sum2 = csum_ipv6_magic(&ip6hdr->saddr, &ip6hdr->daddr, icmp6len, ip6hdr->nexthdr, sum1); /* add pseudoheader */
+      icmp6h->icmp6_cksum = sum2;
+      break;
+      }
+    }
+}
+EXPORT_SYMBOL(ip6_update_csum);
+
+int ip4_input_not_interested(nat46_instance_t *nat46, struct iphdr *iph, struct sk_buff *old_skb) {
+  if (old_skb->protocol != htons(ETH_P_IP)) {
+    nat46debug(3, "Not an IPv4 packet");
+    return 1;
+  }
+  // FIXME: check source to be within our prefix
+  return 0;
+}
+
+int pairs_xlate_v4_to_v6_outer(nat46_instance_t *nat46, nat46_xlate_rulepair_t *apair,
+		struct iphdr *hdr4, uint16_t *sport, uint16_t *dport, void *v6saddr, void *v6daddr) {
+  int ipair = 0;
+  int xlate_src = -1;
+  int xlate_dst = -1;
+  int ret = 0;
+
+  apair = nat46_lpm(nat46, NAT46_IPV4_REMOTE, &hdr4->daddr);
+  if (!apair) {
+    return 0;
+  }
+
+  if (xlate_v4_to_v6(nat46, &apair->local, &hdr4->saddr, v6saddr, sport)) {
+    nat46debug(5, "Src addr %pI4 to %pI6 \n", &hdr4->saddr, v6saddr);
+    xlate_src = ipair;
+  }
+  if (xlate_v4_to_v6(nat46, &apair->remote, &hdr4->daddr, v6daddr, dport)) {
+    nat46debug(5, "Dst addr %pI4 to %pI6 \n", &hdr4->daddr, v6daddr);
+    xlate_dst = ipair;
+  }
+  nat46debug(5, "[nat46] pairs_xlate_v4_to_v6_outer result: src %d dst %d", xlate_src, xlate_dst);
+  if ( (xlate_src >= 0) && (xlate_dst >= 0) ) {
+    ret = 1;
+  } else {
+    nat46debug(1, "[nat46] Could not find a translation pair v4->v6");
+  }
+  return ret;
+}
+
+int xlate_4_to_6(struct net_device *dev, struct iphdr *hdr4, uint16_t sport, uint16_t dport, void *v6saddr, void *v6daddr) {
+	nat46_xlate_rulepair_t apair;
+	return pairs_xlate_v4_to_v6_outer(netdev_nat46_instance(dev), &apair, hdr4, &sport, &dport, v6saddr, v6daddr);
+}
+EXPORT_SYMBOL(xlate_4_to_6);
+
+/*
+ * The sport & dport in inner header will be dport & sport of the outer header, respectively.
+ * Hence, dest. and source ips of inner header will be found in local & remote rules, respectively.
+ */
+int pairs_xlate_v4_to_v6_inner(nat46_instance_t *nat46, struct iphdr *iph,
+		uint16_t sport, uint16_t dport, void *v6saddr, void *v6daddr) {
+	int ipair = 0;
+	nat46_xlate_rulepair_t *apair = NULL;
+	int xlate_src = -1;
+	int xlate_dst = -1;
+
+	apair = nat46_lpm(nat46, NAT46_IPV4_REMOTE, &iph->saddr);
+	if (!apair) {
+		return 0;
+	}
+
+	if (xlate_v4_to_v6(nat46, &apair->local, &iph->daddr, v6daddr, &dport)) {
+		nat46debug(3, "Dst addr %pI4 to %pI6 \n", &iph->daddr, v6daddr);
+		xlate_dst = ipair;
+	}
+	if (xlate_v4_to_v6(nat46, &apair->remote, &iph->saddr, v6saddr, &sport)) {
+		nat46debug(3, "Src addr %pI4 to %pI6 \n", &iph->saddr, v6saddr);
+		xlate_src = ipair;
+	}
+	if ((xlate_src >= 0) && (xlate_dst >= 0)) {
+		/* we did manage to translate it */
+		nat46debug(5, "[nat46] Inner header xlate results: src %d dst %d", xlate_src, xlate_dst);
+		return 1;
+	} else {
+		nat46debug(1, "[nat46] Could not find a translation pair v4->v6");
+	}
+
+	return 0;
+}
+
+static uint16_t xlate_pkt_in_err_v4_to_v6(nat46_instance_t *nat46, struct iphdr *iph,
+					struct sk_buff *old_skb, uint16_t *sport, uint16_t *dport) {
+	struct ipv6hdr ip6h;
+	char v6saddr[16], v6daddr[16];
+	uint16_t temp_port = 0;
+	int ret = 0;
+	struct icmphdr *icmph = (struct icmphdr *)(iph + 1);
+	struct iphdr *iiph = (struct iphdr *)(icmph + 1);
+
+	switch (iiph->protocol) {
+	case IPPROTO_TCP: {
+		struct tcphdr *th = (struct tcphdr *)(iiph + 1);
+		*sport = th->source;
+		*dport = th->dest;
+		iiph->protocol = NEXTHDR_TCP;
+		break;
+	}
+	case IPPROTO_UDP: {
+		struct udphdr *udp = (struct udphdr *)(iiph + 1);
+		*sport = udp->source;
+		*dport = udp->dest;
+		iiph->protocol = NEXTHDR_UDP;
+		break;
+	}
+	case IPPROTO_ICMP: {
+		struct icmphdr *icmph = (struct icmphdr *)(iiph + 1);
+		iiph->protocol = NEXTHDR_ICMP;
+		switch (icmph->type) {
+		case ICMP_ECHO:
+			icmph->type = ICMPV6_ECHO_REQUEST;
+			*sport = *dport = icmph->un.echo.id;
+			break;
+		case ICMP_ECHOREPLY:
+			icmph->type = ICMPV6_ECHO_REPLY;
+			*sport = *dport = icmph->un.echo.id;
+			break;
+		default:
+			nat46debug(3, "ICMP Error message can't be inside another ICMP Error messgae.");
+			*sport = *dport = 0;
+			return 0;
+		}
+		break;
+	}
+	default:
+		nat46debug(3, "[ICMPv4] Next header: %u. Only TCP, UDP, and ICMP are supported.", iiph->protocol);
+		*sport = *dport = 0;
+		return 0;
+	}
+
+	nat46debug(3, "Retrieved from pkt in error: dest port %d, and src port %d.", ntohs(*dport), ntohs(*sport));
+
+	if (!pairs_xlate_v4_to_v6_inner(nat46, iiph, *sport, *dport, v6saddr, v6daddr)) {
+		nat46debug(0, "[nat46] Could not translate inner header v4->v6");
+		*sport = *dport = 0;
+		return 0;
+	}
+
+	fill_v6hdr_from_v4hdr (iiph, &ip6h);
+	memcpy(&ip6h.saddr, v6saddr, sizeof(ip6h.saddr));
+	memcpy(&ip6h.daddr, v6daddr, sizeof(ip6h.daddr));
+
+	if (skb_tailroom(old_skb) >= IPV6V4HDRDELTA){
+		skb_put(old_skb, IPV6V4HDRDELTA);
+		/* ErrorICMP size is less than 576, the inner ipv4 packet will be trimmed */
+		memmove(((char *)iiph + IPV6HDRSIZE), (iiph + 1),
+		ntohs(iph->tot_len) - 2 * IPV4HDRSIZE - sizeof(struct icmphdr));
+		memcpy(iiph, &ip6h, IPV6HDRSIZE);
+	}
+	else {
+		ret = pskb_expand_head(old_skb, 0, IPV6V4HDRDELTA, GFP_ATOMIC);
+		if (unlikely(ret)) {
+			nat46debug(0, "[nat46] Could not copy v4 skb");
+			*sport = *dport = 0;
+			return 0;
+		}
+
+		skb_put(old_skb, IPV6V4HDRDELTA);
+		iiph = (struct iphdr *)(icmp_hdr(old_skb) + 1);
+		/* ErrorICMP size is less than 576, the inner ipv4 packet will be trimmed */
+		memmove(((char *)iiph + IPV6HDRSIZE), (iiph + 1),
+		ntohs(iph->tot_len) - 2 * IPV4HDRSIZE - sizeof(struct icmphdr));
+		memcpy(iiph, &ip6h, IPV6HDRSIZE);
+	}
+	iph->tot_len = htons(ntohs(iph->tot_len) + IPV6V4HDRDELTA);
+
+	/* Swapping Ports for outer header */
+	/* Another work-around till LPM is not present. */
+	temp_port = *sport;
+	*sport = *dport;
+	*dport = temp_port;
+
+	return 1;
+}
+
+/* Return the port number from CE's port set */
+static uint16_t nat46_get_ce_port(nat46_xlate_rulepair_t *pair, uint16_t sport)
+{
+	/*
+	 * 'psid_bits_len' represents number of bits in PSID.
+	 * 'offset' represents offset of PSID in a port number.
+	 */
+	uint8_t psid_bits_len, offset, port_set_bitmask;
+
+	/*
+	 * 'psid16' represent PSID value.
+	 * 'm' represents number of bits in excluded port set.
+	 * 'a' represents number of bits in a 16-bit port number after PSID.
+	 *     It is used to control number of port in one contiguous port set.
+	 *
+	 * Name of a variable 'a' and 'm' is as per Appendix B of [RFC7597].
+	 */
+	uint16_t psid16, value, m, a;
+	nat46_xlate_rule_t *rule;
+
+	/* stores to last port number from CE's port set */
+	static uint16_t port_num;
+
+	rule = &pair->local;
+	offset = rule->psid_offset;
+
+	if (rule->ea_len + rule->v4_pref_len > IPV4_BITS_MAX) {
+		psid_bits_len = rule->ea_len - (IPV4_BITS_MAX - rule->v4_pref_len);
+	} else {
+		return 0;
+	}
+	a = PSID_LEN_MAX - offset - psid_bits_len;
+	psid16 = (ntohs(sport) >> a) & (0xffff >> (PSID_LEN_MAX - psid_bits_len));
+
+	spin_lock(&port_id_lock);
+
+	/* Start case */
+	if (0 == port_num) {
+		m = (offset) ? 1 : 0;
+		port_num = (m << (PSID_LEN_MAX - offset)) | (psid16 << a);
+		value = port_num;
+		spin_unlock(&port_id_lock);
+		return value;
+	}
+
+	/* End of one port set */
+	port_set_bitmask = (1 << a) - 1;
+	value = port_num & port_set_bitmask;
+	if (0 == (value ^ port_set_bitmask)) {
+		m = port_num >> (PSID_LEN_MAX - offset);
+		m++;
+		/* End case */
+		if (m >= (1 << offset)) {
+			m = (offset) ? 1 : 0;
+		}
+		port_num = (m << (PSID_LEN_MAX - offset)) | (psid16 << a);
+		value = port_num;
+		spin_unlock(&port_id_lock);
+		return value;
+	}
+
+	port_num++;
+	value = port_num;
+	spin_unlock(&port_id_lock);
+	return value;
+}
+
+void nat46_ipv4_input(struct sk_buff *old_skb) {
+  nat46_instance_t *nat46 = get_nat46_instance(old_skb);
+  nat46_xlate_rulepair_t apair;
+  struct sk_buff *new_skb;
+  uint16_t sport = 0, dport = 0, ret = 0;
+
+  uint8_t tclass;
+  int flowlabel = 0;
+  int check_for_l4 = 0;
+  int having_l4 = 0;
+  int add_frag_header = 0;
+
+  struct ipv6hdr * hdr6;
+  struct iphdr * hdr4 = ip_hdr(old_skb);
+
+  char v6saddr[16], v6daddr[16];
+
+  if (nat46 == NULL) {
+    printk("nat46:%p skb is dropped for no valid instance found\n", old_skb);
+    return;
+  }
+
+  tclass = hdr4->tos;
+
+  memset(v6saddr, 1, 16);
+  memset(v6daddr, 2, 16);
+
+  if (ip4_input_not_interested(nat46, hdr4, old_skb)) {
+    goto done;
+  }
+  nat46debug(1, "nat46_ipv4_input packet");
+  nat46debug(5, "nat46_ipv4_input protocol: %d, len: %d, flags: %02x", hdr4->protocol, old_skb->len, IPCB(old_skb)->flags);
+  if(0 == (ntohs(hdr4->frag_off) & 0x3FFF) ) { /* Checking for MF */
+    check_for_l4 = 1;
+    if (0 == (ntohs(hdr4->frag_off) & IP_DF)) {
+      if (add_dummy_header) {
+        add_frag_header = 1;
+      }
+      old_skb->ignore_df = 1;
+    }
+  } else {
+    add_frag_header = 1;
+    if (0 == (ntohs(hdr4->frag_off) & 0x1FFF)) { /* Checking for Frag Offset */
+      check_for_l4 = 1;
+    }
+  }
+
+  if (check_for_l4) {
+    switch(hdr4->protocol) {
+      case IPPROTO_TCP: {
+	struct tcphdr *th = tcp_hdr(old_skb);
+	sport = th->source;
+	dport = th->dest;
+	having_l4 = 1;
+	break;
+	}
+      case IPPROTO_UDP: {
+	struct udphdr *udp = udp_hdr(old_skb);
+	sport = udp->source;
+	dport = udp->dest;
+	having_l4 = 1;
+	break;
+	}
+      case IPPROTO_ICMP:
+        ret = nat46_fixup_icmp(nat46, hdr4, old_skb, &sport, &dport);
+        nat46debug(3, "ICMP translated to dest port %d, and src port %d.", ntohs(dport), ntohs(sport));
+        having_l4 = 1;
+        break;
+      default:
+	break;
+    }
+  } else {
+    if (IPPROTO_ICMP == hdr4->protocol) {
+      hdr4->protocol = NEXTHDR_ICMP;
+    }
+    dport = 0;
+    sport = 0;
+    having_l4 = 1;
+  }
+
+  if(!pairs_xlate_v4_to_v6_outer(nat46, &apair, hdr4, having_l4 ? &sport : NULL, having_l4 ? &dport : NULL, v6saddr, v6daddr)) {
+    if (net_ratelimit()) {
+      nat46debug(0, "[nat46] Could not translate v4->v6");
+    }
+    goto done;
+  }
+
+  new_skb = skb_copy(old_skb, GFP_ATOMIC);
+  if (!new_skb) {
+    nat46debug(0, "[nat46] Could not copy v4 skb");
+    goto done;
+  }
+
+  /* Remove any debris in the socket control block */
+  memset(IPCB(new_skb), 0, sizeof(struct inet_skb_parm));
+  /* Remove netfilter references to IPv4 packet, new netfilter references will be created based on IPv6 packet */
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)
+  nf_reset(new_skb);
+#else
+  skb_ext_reset(new_skb);
+  nf_reset_ct(new_skb);
+#endif
+
+  /* expand header (add 20 extra bytes at the beginning of sk_buff) */
+  pskb_expand_head(new_skb, IPV6V4HDRDELTA + (add_frag_header?8:0), 0, GFP_ATOMIC);
+
+  skb_push(new_skb, IPV6V4HDRDELTA + (add_frag_header?8:0)); /* push boundary by extra 20 bytes */
+
+  skb_reset_network_header(new_skb);
+  skb_set_transport_header(new_skb, IPV6HDRSIZE + (add_frag_header?8:0) ); /* transport (TCP/UDP/ICMP/...) header starts after 40 bytes */
+
+  hdr6 = ipv6_hdr(new_skb);
+  memset(hdr6, 0, sizeof(*hdr6) + (add_frag_header?8:0));
+
+  /* build IPv6 header */
+  *(__be32 *)hdr6 = htonl(0x60000000 | (tclass << 20)) | flowlabel; /* version, priority, flowlabel */
+
+  /* IPv6 length is a payload length, IPv4 is hdr+payload */
+  hdr6->payload_len = htons(ntohs(hdr4->tot_len) - sizeof(struct iphdr) + (add_frag_header?8:0));
+  hdr6->nexthdr = hdr4->protocol;
+  hdr6->hop_limit = hdr4->ttl;
+  memcpy(&hdr6->saddr, v6saddr, 16);
+  memcpy(&hdr6->daddr, v6daddr, 16);
+
+  new_skb->protocol = htons(ETH_P_IPV6);
+
+  if (add_frag_header) {
+    struct frag_hdr *fh = (struct frag_hdr*)(hdr6 + 1);
+    uint16_t ce_port_num = 0;
+
+    /* Flag to represent whether PSID is assigned to MAP-T node or not */
+    bool is_psid = false;
+
+    fh->frag_off = htons(((ntohs(hdr4->frag_off) >> 13) & 7) + ((ntohs(hdr4->frag_off) & 0x1FFF) << 3));
+    fh->nexthdr = hdr4->protocol;
+
+    /*
+     * PSID assigned MAP-T node will have non-zero ea_len and we are currently
+     * only supporting NAT46_XLATE_MAP as the CE's rule style.
+     */
+    is_psid = (apair.local.style == NAT46_XLATE_MAP) && apair.local.ea_len;
+    if (is_psid) {
+      ce_port_num = nat46_get_ce_port(nat46->pairs, sport);
+      nat46debug(10, "\n ce port number is %02x\n", ce_port_num);
+
+      /* Assign CE's port number as the fragment identifier */
+      if (ce_port_num) {
+        fh->identification = htonl(ce_port_num);
+      } else {
+        fh->identification = htonl(ntohs(hdr4->id));
+      }
+    } else {
+      fh->identification = htonl(ntohs(hdr4->id));
+    }
+
+
+  }
+  ip6_update_csum(new_skb, hdr6, add_frag_header);
+
+  hdr6->nexthdr = add_frag_header ? NEXTHDR_FRAGMENT : hdr4->protocol;
+
+
+  // FIXME: check if you can not fit the packet into the cached MTU
+  // if (dst_mtu(skb_dst(new_skb))==0) { }
+
+  nat46debug(5, "about to send v6 packet, flags: %02x",  IPCB(new_skb)->flags);
+  nat46_netdev_count_xmit(new_skb, old_skb->dev);
+
+  /* set skb->iif */
+  new_skb->skb_iif = old_skb->skb_iif;
+
+  netif_rx(new_skb);
+
+done:
+  release_nat46_instance(nat46);
+}
+
+int nat46_get_npairs(struct net_device *dev) {
+	nat46_instance_t *nat46 = netdev_nat46_instance(dev);
+	return nat46->npairs;
+}
+EXPORT_SYMBOL(nat46_get_npairs);
+
+bool nat46_get_rule_config(struct net_device *dev, nat46_xlate_rulepair_t **nat46_rule_pair, int *count) {
+	nat46_instance_t *nat46 = netdev_nat46_instance(dev);
+	if (nat46->npairs < 1) {
+		/*
+		 * no rules ?
+		 */
+		return false;
+	}
+	*count = nat46->npairs;
+	*nat46_rule_pair = nat46->pairs;
+	return true;
+}
+EXPORT_SYMBOL(nat46_get_rule_config);
+
+/*
+ * Function to get MAP-T rules and flags.
+ */
+bool nat46_get_info(struct net_device *dev, nat46_xlate_rulepair_t **nat46_rule_pair,
+		 int *count, u8 *flag) {
+	if ((!dev) || (!nat46_rule_pair) || (!count) || (!flag)) {
+		return false;
+	}
+
+	if (!nat46_get_rule_config(dev, nat46_rule_pair, count)) {
+		return false;
+	}
+
+	/* Check add dummy header flag */
+	if (add_dummy_header) {
+		*flag = ADD_DUMMY_HEADER;
+	}
+	return true;
+}
+EXPORT_SYMBOL(nat46_get_info);
diff --git a/nat46/nat46/modules/nat46-core.h b/nat46/nat46/modules/nat46-core.h
new file mode 100644
index 0000000..fd72daf
--- /dev/null
+++ b/nat46/nat46/modules/nat46-core.h
@@ -0,0 +1,119 @@
+/*
+ * NAT46 core definitions
+ *
+ * Copyright (c) 2013-2014 Andrew Yourtchenko <ayourtch@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __NAT46_CORE_H__
+#define __NAT46_CORE_H__
+
+#include "nat46-glue.h"
+
+// #define nat46debug(level, format, ...) debug(DBG_V6, level, format, __VA_ARGS__)
+// #define nat46debug(level, format, ...)
+#define nat46debug(level, format, ...) do { if(nat46->debug >= level) { printk(format "\n", ##__VA_ARGS__); } } while (0)
+
+#define U8_MASK (uint8_t)(0xFF)
+#define U32_MASK (uint32_t)(~0U)
+#define BITS_PER_BYTE 8
+#define PSID_LEN_MAX 16
+#define NUM_RULE_PAIRS_MAX 32
+#define IPV4_BITS_MAX 32
+#define EA_LEN_MAX 48
+#define IPV6_BITS_MAX 128
+
+/* Flag definations for MAP-T */
+#define ADD_DUMMY_HEADER 0x01
+
+#define IPV6HDRSIZE 40
+#define IPV4HDRSIZE 20
+#define IPV6V4HDRDELTA (IPV6HDRSIZE - IPV4HDRSIZE)
+
+/* 
+ * A generic v4<->v6 translation structure.
+ * The currently supported translation styles:
+ */
+
+typedef enum {
+  NAT46_XLATE_NONE = 0,
+  NAT46_XLATE_MAP,
+  NAT46_XLATE_MAP0,
+  NAT46_XLATE_RFC6052
+} nat46_xlate_style_t;
+
+/*
+ * Enumeration for sorting pairs based on
+ * type of prefix length.
+ */
+typedef enum {
+  NAT46_IPV4_LOCAL = 0,
+  NAT46_IPV4_REMOTE,
+  NAT46_IPV6_LOCAL,
+  NAT46_IPV6_REMOTE
+} nat46_rule_type_t;
+
+#define NAT46_SIGNATURE 0x544e3634
+#define FREED_NAT46_SIGNATURE 0xdead544e
+
+typedef struct nat46_xlate_rule {
+  nat46_xlate_style_t style;
+  struct in6_addr v6_pref;
+  int v6_pref_len;
+  u32 v4_pref;
+  int v4_pref_len;
+  int ea_len;
+  int psid_offset;
+  int fmr_flag;
+} nat46_xlate_rule_t;
+
+typedef struct nat46_xlate_rulepair {
+  nat46_xlate_rule_t local;
+  nat46_xlate_rule_t remote;
+} nat46_xlate_rulepair_t;
+
+typedef struct {
+  u32 sig; /* nat46 signature */
+  int refcount;
+  int debug;
+
+  int npairs;
+  nat46_xlate_rulepair_t pairs[NUM_RULE_PAIRS_MAX]; /* npairs */
+  nat46_xlate_rulepair_t sorted_ipv4_local_pairs[NUM_RULE_PAIRS_MAX]; /* npairs */
+  nat46_xlate_rulepair_t sorted_ipv4_remote_pairs[NUM_RULE_PAIRS_MAX]; /* npairs */
+  nat46_xlate_rulepair_t sorted_ipv6_local_pairs[NUM_RULE_PAIRS_MAX]; /* npairs */
+  nat46_xlate_rulepair_t sorted_ipv6_remote_pairs[NUM_RULE_PAIRS_MAX]; /* npairs */
+} nat46_instance_t;
+
+void nat46_ipv6_input(struct sk_buff *old_skb);
+void nat46_ipv4_input(struct sk_buff *old_skb);
+
+int nat46_set_ipair_config(nat46_instance_t *nat46, int ipair, char *buf, int count);
+int nat46_set_config(nat46_instance_t *nat46, char *buf, int count);
+
+int nat46_get_ipair_config(nat46_instance_t *nat46, int ipair, char *buf, int count);
+int nat46_get_config(nat46_instance_t *nat46, char *buf, int count);
+
+char *get_next_arg(char **ptail);
+nat46_instance_t *get_nat46_instance(struct sk_buff *sk);
+
+nat46_instance_t *alloc_nat46_instance(int npairs, nat46_instance_t *old, int from_ipair, int to_ipair);
+void release_nat46_instance(nat46_instance_t *nat46);
+
+int xlate_6_to_4(struct net_device *dev, struct ipv6hdr *ip6h, uint16_t proto, __u32 *pv4saddr, __u32 *pv4daddr);
+int xlate_4_to_6(struct net_device *dev, struct iphdr *hdr4, uint16_t sport, uint16_t dport, void *v6saddr, void *v6daddr);
+void ip6_update_csum(struct sk_buff * skb, struct ipv6hdr * ip6hdr, int do_atomic_frag);
+bool nat46_get_rule_config(struct net_device *dev, nat46_xlate_rulepair_t **nat46_rule_pair, int *count);
+int nat46_get_npairs(struct net_device *dev);
+bool nat46_get_info(struct net_device *dev, nat46_xlate_rulepair_t **nat46_rule_pair,
+		int *count, u8 *flag);
+#endif
diff --git a/nat46/nat46/modules/nat46-glue.c b/nat46/nat46/modules/nat46-glue.c
new file mode 100644
index 0000000..06751fb
--- /dev/null
+++ b/nat46/nat46/modules/nat46-glue.c
@@ -0,0 +1,71 @@
+/*
+ * glue functions, candidates to go to -core
+ *
+ * Copyright (c) 2013-2014 Andrew Yourtchenko <ayourtch@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+
+#include "nat46-glue.h"
+#include "nat46-core.h"
+
+static DEFINE_MUTEX(ref_lock);
+int is_valid_nat46(nat46_instance_t *nat46) {
+  return (nat46 && (nat46->sig == NAT46_SIGNATURE));
+}
+
+nat46_instance_t *alloc_nat46_instance(int npairs, nat46_instance_t *old, int from_ipair, int to_ipair) {
+  nat46_instance_t *nat46 = kzalloc(sizeof(nat46_instance_t) + npairs*sizeof(nat46_xlate_rulepair_t), GFP_KERNEL);
+  if (!nat46) {
+    printk("[nat46] make_nat46_instance: can not alloc a nat46 instance with %d pairs\n", npairs);
+    return NULL;
+  } else {
+    printk("[nat46] make_nat46_instance: allocated nat46 instance with %d pairs\n", npairs);
+  }
+  nat46->sig = NAT46_SIGNATURE;
+  nat46->npairs = npairs;
+  nat46->refcount = 1; /* The caller gets the reference */
+  if (old) {
+    nat46->debug = old->debug;
+    for(; (from_ipair >= 0) && (to_ipair >= 0) &&
+          (from_ipair < old->npairs) && (to_ipair < nat46->npairs); from_ipair++, to_ipair++) {
+      nat46->pairs[to_ipair] = old->pairs[from_ipair];
+    }
+  }
+  return nat46;
+}
+
+
+nat46_instance_t *get_nat46_instance(struct sk_buff *sk) {
+  nat46_instance_t *nat46 = netdev_nat46_instance(sk->dev);
+  mutex_lock(&ref_lock);
+  if (is_valid_nat46(nat46)) {
+    nat46->refcount++;
+    mutex_unlock(&ref_lock);
+    return nat46;
+  } else {
+    mutex_unlock(&ref_lock);
+    printk("[nat46] get_nat46_instance: Could not find a valid NAT46 instance!");
+    return NULL;
+  }
+}
+
+void release_nat46_instance(nat46_instance_t *nat46) {
+  mutex_lock(&ref_lock);
+  nat46->refcount--;
+  if(0 == nat46->refcount) {
+    nat46->sig = FREED_NAT46_SIGNATURE;
+    printk("[nat46] release_nat46_instance: freeing nat46 instance with %d pairs\n", nat46->npairs);
+    kfree(nat46);
+  }
+  mutex_unlock(&ref_lock);
+}
diff --git a/nat46/nat46/modules/nat46-glue.h b/nat46/nat46/modules/nat46-glue.h
new file mode 100644
index 0000000..bbd71f5
--- /dev/null
+++ b/nat46/nat46/modules/nat46-glue.h
@@ -0,0 +1,32 @@
+/*
+ * Glue headers, not much here.
+ *
+ * Copyright (c) 2013-2014 Andrew Yourtchenko <ayourtch@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <net/ip.h>
+#include <net/ipv6.h>
+#include <linux/icmp.h>
+#include <linux/skbuff.h>
+#include <net/ip6_route.h>
+#include <linux/inet.h>
+#include <net/ip6_checksum.h>
+#include "nat46-netdev.h"
+
+
+#ifndef IP6_OFFSET
+#define IP6_OFFSET      0xFFF8
+#endif
+
+#define assert(x) printk("Assertion failed: %s", #x)
+
diff --git a/nat46/nat46/modules/nat46-module.c b/nat46/nat46/modules/nat46-module.c
new file mode 100644
index 0000000..fdb20ba
--- /dev/null
+++ b/nat46/nat46/modules/nat46-module.c
@@ -0,0 +1,200 @@
+/*
+ *
+ * module-wide functions, mostly boilerplate
+ *
+ * Copyright (c) 2013-2014 Andrew Yourtchenko <ayourtch@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/ip.h>
+#include <linux/icmp.h>
+#include <linux/inet.h>
+#include <linux/icmpv6.h>
+#include <linux/inetdevice.h>
+#include <linux/types.h>
+#include <linux/netfilter_ipv4.h>
+
+
+#include <linux/fs.h>           // for basic filesystem
+#include <linux/proc_fs.h>      // for the proc filesystem
+#include <linux/seq_file.h>     // for sequence files
+
+#include <net/ip.h>
+#include <net/tcp.h>
+#include <net/udp.h>
+#include <net/icmp.h>
+#include <net/route.h>
+#include <net/ip6_route.h>
+
+#include <net/ipv6.h>
+
+#include "nat46-core.h"
+#include "nat46-netdev.h"
+
+#define NAT46_PROC_NAME	"nat46"
+#define NAT46_CONTROL_PROC_NAME "control"
+
+#ifndef NAT46_VERSION
+#define NAT46_VERSION __DATE__ " " __TIME__
+#endif
+
+MODULE_LICENSE("GPL");
+MODULE_AUTHOR("Andrew Yourtchenko <ayourtch@gmail.com>");
+MODULE_DESCRIPTION("NAT46 stateless translation");
+
+int debug = 0;
+module_param(debug, int, 0);
+MODULE_PARM_DESC(debug, "debugging messages level (default=1)");
+bool add_dummy_header = 0;
+module_param(add_dummy_header, bool, 0);
+MODULE_PARM_DESC(add_dummy_header, "Add dummy fragment header");
+
+static DEFINE_MUTEX(add_del_lock);
+static struct proc_dir_entry *nat46_proc_entry;
+static struct proc_dir_entry *nat46_proc_parent;
+
+
+static int nat46_proc_show(struct seq_file *m, void *v)
+{
+	nat64_show_all_configs(m);
+	return 0;
+}
+
+
+static int nat46_proc_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, nat46_proc_show, NULL);
+}
+
+static char *get_devname(char **ptail)
+{
+	const int maxlen = IFNAMSIZ-1;
+	char *devname = get_next_arg(ptail);
+	if(strlen(devname) > maxlen) {
+		printk(KERN_INFO "nat46: '%s' is "
+			"longer than %d chars, truncating\n", devname, maxlen);
+		devname[maxlen] = 0;
+	}
+	return devname;
+}
+
+static ssize_t nat46_proc_write(struct file *file, const char __user *buffer,
+                              size_t count, loff_t *ppos)
+{
+	char *buf = NULL;
+	char *tail = NULL;
+	char *devname = NULL;
+	char *arg_name = NULL;
+
+	buf = kmalloc(sizeof(char) * (count + 1), GFP_KERNEL);
+	if (!buf)
+		return -ENOMEM;
+
+	if (copy_from_user(buf, buffer, count)) {
+		kfree(buf);
+		return -EFAULT;
+	}
+	tail = buf;
+	buf[count] = '\0';
+	if( (count > 0) && (buf[count-1] == '\n') ) {
+		buf[count-1] = '\0';
+	}
+
+	while (NULL != (arg_name = get_next_arg(&tail))) {
+		if (0 == strcmp(arg_name, "add")) {
+			devname = get_devname(&tail);
+			printk(KERN_INFO "nat46: adding device (%s)\n", devname);
+			mutex_lock(&add_del_lock);
+			nat46_create(devname);
+			mutex_unlock(&add_del_lock);
+		} else if (0 == strcmp(arg_name, "del")) {
+			devname = get_devname(&tail);
+			printk(KERN_INFO "nat46: deleting device (%s)\n", devname);
+			mutex_lock(&add_del_lock);
+			nat46_destroy(devname);
+			mutex_unlock(&add_del_lock);
+		} else if (0 == strcmp(arg_name, "config")) {
+			devname = get_devname(&tail);
+			printk(KERN_INFO "nat46: configure device (%s) with '%s'\n", devname, tail);
+			mutex_lock(&add_del_lock);
+			nat46_configure(devname, tail);
+			mutex_unlock(&add_del_lock);
+		} else if (0 == strcmp(arg_name, "insert")) {
+			devname = get_devname(&tail);
+			printk(KERN_INFO "nat46: insert new rule into device (%s) with '%s'\n", devname, tail);
+			mutex_lock(&add_del_lock);
+			nat46_insert(devname, tail);
+			mutex_unlock(&add_del_lock);
+		}
+	}
+
+	kfree(buf);
+	return count;
+}
+
+static const struct file_operations nat46_proc_fops = {
+	.owner		= THIS_MODULE,
+	.open		= nat46_proc_open,
+	.read		= seq_read,
+	.llseek		= seq_lseek,
+	.release	= single_release,
+	.write		= nat46_proc_write,
+};
+
+
+int create_nat46_proc_entry(void) {
+	nat46_proc_parent = proc_mkdir(NAT46_PROC_NAME, init_net.proc_net);
+	if (nat46_proc_parent) {
+		nat46_proc_entry = proc_create(NAT46_CONTROL_PROC_NAME, 0644, nat46_proc_parent, &nat46_proc_fops );
+		if(!nat46_proc_entry) {
+			printk(KERN_INFO "Error creating proc entry");
+			return -ENOMEM;
+		}
+	}
+	return 0;
+}
+
+
+static int __init nat46_init(void)
+{
+	int ret = 0;
+
+	printk("nat46: module (version %s) loaded.\n", NAT46_VERSION);
+	ret = create_nat46_proc_entry();
+	if(ret) {
+		goto error;
+	}
+	return 0;
+
+error:
+	return ret;
+}
+
+static void __exit nat46_exit(void)
+{
+	nat46_destroy_all();
+	if (nat46_proc_parent) {
+		if (nat46_proc_entry) {
+			remove_proc_entry(NAT46_CONTROL_PROC_NAME, nat46_proc_parent);
+		}
+		remove_proc_entry(NAT46_PROC_NAME, init_net.proc_net);
+	}
+	printk("nat46: module unloaded.\n");
+}
+
+module_init(nat46_init);
+module_exit(nat46_exit);
+
+
diff --git a/nat46/nat46/modules/nat46-module.h b/nat46/nat46/modules/nat46-module.h
new file mode 100644
index 0000000..ffb0a76
--- /dev/null
+++ b/nat46/nat46/modules/nat46-module.h
@@ -0,0 +1,17 @@
+/*
+ *
+ * Copyright (c) 2013-2014 Andrew Yourtchenko <ayourtch@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+extern int debug;
+extern bool add_dummy_header;
diff --git a/nat46/nat46/modules/nat46-netdev.c b/nat46/nat46/modules/nat46-netdev.c
new file mode 100644
index 0000000..b845020
--- /dev/null
+++ b/nat46/nat46/modules/nat46-netdev.c
@@ -0,0 +1,402 @@
+/*
+ * Network device related boilerplate functions
+ *
+ * Copyright (c) 2013-2014 Andrew Yourtchenko <ayourtch@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+
+
+#include <linux/if_arp.h>
+#include <linux/netdevice.h>
+#include <linux/route.h>
+#include <linux/skbuff.h>
+#include <net/ip6_fib.h>
+#include <net/ip6_route.h>
+#include <net/ipv6.h>
+#include <linux/version.h>
+#include <net/ip_tunnels.h>
+#include <linux/radix-tree.h>
+#include "nat46-core.h"
+#include "nat46-module.h"
+
+#define NETDEV_DEFAULT_NAME "nat46."
+static RADIX_TREE(netdev_tree, GFP_ATOMIC);
+
+typedef struct {
+  u32 sig;
+  nat46_instance_t *nat46;
+} nat46_netdev_priv_t;
+
+static u8 netdev_count = 0;
+
+static int nat46_netdev_up(struct net_device *dev);
+static int nat46_netdev_down(struct net_device *dev);
+static int nat46_netdev_init(struct net_device *dev);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,4,0)
+static void nat46_get_stats64(struct net_device *dev, struct rtnl_link_stats64 *tot);
+#else
+static struct rtnl_link_stats64 *nat46_get_stats64(struct net_device *dev,
+                                                   struct rtnl_link_stats64 *tot);
+#endif
+static netdev_tx_t nat46_netdev_xmit(struct sk_buff *skb, struct net_device *dev);
+
+
+static const struct net_device_ops nat46_netdev_ops = {
+	.ndo_init       = nat46_netdev_init,	/* device specific initialization */
+	.ndo_open       = nat46_netdev_up,      /* Called at ifconfig nat46 up */
+	.ndo_stop       = nat46_netdev_down,    /* Called at ifconfig nat46 down */
+	.ndo_start_xmit = nat46_netdev_xmit,    /* REQUIRED, must return NETDEV_TX_OK */
+	.ndo_get_stats64 = nat46_get_stats64,	/* 64 bit device stats */
+};
+
+static int nat46_netdev_init(struct net_device *dev)
+{
+	int i;
+	dev->tstats = alloc_percpu(struct pcpu_sw_netstats);
+	if (!dev->tstats) {
+		return -ENOMEM;
+	}
+
+	for_each_possible_cpu(i) {
+		struct pcpu_sw_netstats *ipt_stats;
+		ipt_stats = per_cpu_ptr(dev->tstats, i);
+		u64_stats_init(&ipt_stats->syncp);
+	}
+	return 0;
+}
+
+static void nat46_netdev_resource_free(struct net_device *dev)
+{
+	free_percpu(dev->tstats);
+}
+
+static int nat46_netdev_up(struct net_device *dev)
+{
+	netif_start_queue(dev);
+	return 0;
+}
+
+static int nat46_netdev_down(struct net_device *dev)
+{
+	netif_stop_queue(dev);
+	return 0;
+}
+
+static netdev_tx_t nat46_netdev_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats);
+
+	u64_stats_update_begin(&tstats->syncp);
+	tstats->rx_packets++;
+	tstats->rx_bytes += skb->len;
+	u64_stats_update_end(&tstats->syncp);
+	put_cpu_ptr(tstats);
+	if(ETH_P_IP == ntohs(skb->protocol)) {
+		nat46_ipv4_input(skb);
+	}
+	if(ETH_P_IPV6 == ntohs(skb->protocol)) {
+		nat46_ipv6_input(skb);
+	}
+	kfree_skb(skb);
+	return NETDEV_TX_OK;
+}
+
+void nat46_netdev_count_xmit(struct sk_buff *skb, struct net_device *dev) {
+	struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats);
+
+	u64_stats_update_begin(&tstats->syncp);
+	tstats->tx_packets++;
+	tstats->tx_bytes += skb->len;
+	u64_stats_update_end(&tstats->syncp);
+	put_cpu_ptr(tstats);
+}
+
+void nat46_update_stats(struct net_device *dev, uint32_t rx_packets, uint32_t rx_bytes,
+			uint32_t tx_packets, uint32_t tx_bytes, uint32_t rx_dropped, uint32_t tx_dropped)
+{
+	struct pcpu_sw_netstats *tstats = get_cpu_ptr(dev->tstats);
+
+	u64_stats_update_begin(&tstats->syncp);
+	tstats->rx_packets += rx_packets;
+	tstats->rx_bytes += rx_bytes;
+	tstats->tx_packets += tx_packets;
+	tstats->tx_bytes += tx_bytes;
+	dev->stats.rx_dropped += rx_dropped;
+	dev->stats.tx_dropped += tx_dropped;
+	u64_stats_update_end(&tstats->syncp);
+	put_cpu_ptr(tstats);
+}
+EXPORT_SYMBOL(nat46_update_stats);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,4,0)
+static void nat46_get_stats64(struct net_device *dev,
+#else
+static struct rtnl_link_stats64 *nat46_get_stats64(struct net_device *dev,
+#endif
+		struct rtnl_link_stats64 *tot)
+{
+	ip_tunnel_get_stats64(dev, tot);
+}
+
+void *netdev_nat46_instance(struct net_device *dev) {
+	nat46_netdev_priv_t *priv = netdev_priv(dev);
+	return priv->nat46;
+}
+
+static void netdev_nat46_set_instance(struct net_device *dev, nat46_instance_t *new_nat46) {
+	nat46_netdev_priv_t *priv = netdev_priv(dev);
+	if(priv->nat46) {
+		release_nat46_instance(priv->nat46);
+	}
+	priv->nat46 = new_nat46;
+}
+
+static void nat46_netdev_setup(struct net_device *dev)
+{
+	nat46_netdev_priv_t *priv = netdev_priv(dev);
+	nat46_instance_t *nat46 = alloc_nat46_instance(1, NULL, -1, -1);
+
+	memset(priv, 0, sizeof(*priv));
+	priv->sig = NAT46_DEVICE_SIGNATURE;
+	priv->nat46 = nat46;
+
+	dev->netdev_ops = &nat46_netdev_ops;
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,4,0)
+	dev->priv_destructor = nat46_netdev_resource_free;
+#else
+	dev->destructor = nat46_netdev_resource_free;
+#endif
+	dev->type = ARPHRD_NONE;
+	dev->hard_header_len = 0;
+	dev->addr_len = 0;
+	dev->mtu = 16384; /* iptables does reassembly. Rather than using ETH_DATA_LEN, let's try to get as much mileage as we can with the Linux stack */
+	dev->features = NETIF_F_NETNS_LOCAL;
+	dev->flags = IFF_NOARP | IFF_POINTOPOINT;
+}
+
+int nat46_netdev_create(char *basename, struct net_device **dev)
+{
+	int ret = 0;
+	char *devname = NULL;
+	int automatic_name = 0;
+
+	if (basename && strcmp("", basename)) {
+		devname = kmalloc(strlen(basename)+1, GFP_KERNEL);
+	} else {
+		devname = kmalloc(strlen(NETDEV_DEFAULT_NAME)+3+1, GFP_KERNEL);
+		automatic_name = 1;
+	}
+	if (!devname) {
+		printk("nat46: can not allocate memory to store device name.\n");
+		ret = -ENOMEM;
+		goto err;
+	}
+	if (automatic_name) {
+		snprintf(devname, strlen(NETDEV_DEFAULT_NAME)+3, "%s%d", NETDEV_DEFAULT_NAME, netdev_count);
+		netdev_count++;
+	} else {
+		strcpy(devname, basename);
+	}
+
+#if LINUX_VERSION_CODE <= KERNEL_VERSION(3,17,0)
+	*dev = alloc_netdev(sizeof(nat46_instance_t), devname, nat46_netdev_setup);
+#else
+	*dev = alloc_netdev(sizeof(nat46_instance_t), devname, NET_NAME_UNKNOWN, nat46_netdev_setup);
+#endif
+	if (!*dev) {
+		printk("nat46: Unable to allocate nat46 device '%s'.\n", devname);
+		ret = -ENOMEM;
+		goto err_alloc_dev;
+	}
+
+	ret = register_netdev(*dev);
+	if(ret) {
+		printk("nat46: Unable to register nat46 device.\n");
+		ret = -ENOMEM;
+		goto err_register_dev;
+	}
+
+	printk("nat46: netdevice nat46 '%s' created successfully.\n", devname);
+	kfree(devname);
+
+	/*
+	 * add this netdevice to list
+	 */
+	radix_tree_insert(&netdev_tree, (*dev)->ifindex, (void *)*dev);
+
+	return 0;
+
+err_register_dev:
+	free_netdev(*dev);
+err_alloc_dev:
+	kfree(devname);
+err:
+	return ret;
+}
+
+void nat46_netdev_destroy(struct net_device *dev)
+{
+	netdev_nat46_set_instance(dev, NULL);
+	unregister_netdev(dev);
+	radix_tree_delete(&netdev_tree, dev->ifindex);
+	printk("nat46: Destroying nat46 device.\n");
+}
+
+bool is_map_t_dev(struct net_device *dev)
+{
+	if(!dev) {
+		return false;
+	}
+
+	if(radix_tree_lookup(&netdev_tree, dev->ifindex)) {
+		return true;
+	}
+	return false;
+}
+EXPORT_SYMBOL(is_map_t_dev);
+
+static int is_nat46(struct net_device *dev) {
+	nat46_netdev_priv_t *priv = netdev_priv(dev);
+	return (priv && (NAT46_DEVICE_SIGNATURE == priv->sig));
+}
+
+
+static struct net_device *find_dev(char *name) {
+	struct net_device *dev;
+	struct net_device *out = NULL;
+
+	if(!name) {
+		return NULL;
+	}
+
+	read_lock(&dev_base_lock);
+	dev = first_net_device(&init_net);
+	while (dev) {
+		if((0 == strcmp(dev->name, name)) && is_nat46(dev)) {
+			if(debug) {
+				printk(KERN_INFO "found [%s]\n", dev->name);
+			}
+			out = dev;
+			break;
+		}
+		dev = next_net_device(dev);
+	}
+	read_unlock(&dev_base_lock);
+	return out;
+}
+
+int nat46_create(char *devname) {
+	int ret = 0;
+	struct net_device *dev = find_dev(devname);
+	if (dev) {
+		printk("Can not add: device '%s' already exists!\n", devname);
+		return -1;
+	}
+	ret = nat46_netdev_create(devname, &dev);
+	return ret;
+}
+
+int nat46_destroy(char *devname) {
+	struct net_device *dev = find_dev(devname);
+	if(dev) {
+		printk("Destroying '%s'\n", devname);
+		nat46_netdev_destroy(dev);
+		return 0;
+	} else {
+		printk("Could not find device '%s'\n", devname);
+		return -1;
+	}
+}
+
+int nat46_insert(char *devname, char *buf) {
+	struct net_device *dev = find_dev(devname);
+	int ret = -1;
+	if(dev) {
+		nat46_instance_t *nat46 = netdev_nat46_instance(dev);
+		nat46_instance_t *nat46_new;
+		if(nat46->npairs == NUM_RULE_PAIRS_MAX) {
+			printk("Could not insert a new rule on device %s\n", devname);
+			return ret;
+		}
+
+		nat46_new = alloc_nat46_instance(nat46->npairs+1, nat46, 0, 1);
+		if(nat46_new) {
+			netdev_nat46_set_instance(dev, nat46_new);
+			ret = nat46_set_ipair_config(nat46_new, 0, buf, strlen(buf));
+		} else {
+			printk("Could not insert a new rule on device %s\n", devname);
+		}
+	}
+	return ret;
+}
+
+int nat46_configure(char *devname, char *buf) {
+	struct net_device *dev = find_dev(devname);
+	if(dev) {
+		nat46_instance_t *nat46 = netdev_nat46_instance(dev);
+		return nat46_set_config(nat46, buf, strlen(buf));
+	} else {
+		return -1;
+	}
+}
+
+void nat64_show_all_configs(struct seq_file *m) {
+        struct net_device *dev;
+	read_lock(&dev_base_lock);
+	dev = first_net_device(&init_net);
+	while (dev) {
+		if(is_nat46(dev)) {
+			nat46_instance_t *nat46 = netdev_nat46_instance(dev);
+			int buflen = 1024;
+			int ipair = -1;
+			char *buf = kmalloc(buflen+1, GFP_KERNEL);
+			seq_printf(m, "add %s\n", dev->name);
+			if(buf) {
+				for(ipair = 0; ipair < nat46->npairs; ipair++) {
+					nat46_get_ipair_config(nat46, ipair, buf, buflen);
+					if(ipair < nat46->npairs-1) {
+						seq_printf(m,"insert %s %s\n", dev->name, buf);
+					} else {
+						seq_printf(m,"config %s %s\n", dev->name, buf);
+					}
+				}
+				seq_printf(m,"\n");
+				kfree(buf);
+			}
+		}
+		dev = next_net_device(dev);
+	}
+	read_unlock(&dev_base_lock);
+
+}
+
+void nat46_destroy_all(void) {
+        struct net_device *dev;
+        struct net_device *nat46dev;
+	do {
+		read_lock(&dev_base_lock);
+		nat46dev = NULL;
+		dev = first_net_device(&init_net);
+		while (dev) {
+			if(is_nat46(dev)) {
+				nat46dev = dev;
+			}
+			dev = next_net_device(dev);
+		}
+		read_unlock(&dev_base_lock);
+		if(nat46dev) {
+			nat46_netdev_destroy(nat46dev);
+		}
+	} while (nat46dev);
+
+}
diff --git a/nat46/nat46/modules/nat46-netdev.h b/nat46/nat46/modules/nat46-netdev.h
new file mode 100644
index 0000000..a9e8596
--- /dev/null
+++ b/nat46/nat46/modules/nat46-netdev.h
@@ -0,0 +1,29 @@
+/*
+ *
+ * Copyright (c) 2013-2014 Andrew Yourtchenko <ayourtch@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define NAT46_DEVICE_SIGNATURE 0x544e36dd
+
+int nat46_create(char *devname);
+int nat46_destroy(char *devname);
+int nat46_insert(char *devname, char *buf);
+int nat46_configure(char *devname, char *buf);
+void nat46_destroy_all(void);
+void nat64_show_all_configs(struct seq_file *m);
+void nat46_netdev_count_xmit(struct sk_buff *skb, struct net_device *dev);
+void *netdev_nat46_instance(struct net_device *dev);
+
+void nat46_update_stats(struct net_device *dev, uint32_t rx_packets, uint32_t rx_bytes, uint32_t tx_packets, uint32_t tx_bytes,
+							uint32_t rx_dropped, uint32_t tx_dropped);
+bool is_map_t_dev(struct net_device *dev);
diff --git a/qca-nss-clients/.gitignore b/qca-nss-clients/.gitignore
new file mode 100644
index 0000000..cb831ad
--- /dev/null
+++ b/qca-nss-clients/.gitignore
@@ -0,0 +1,14 @@
+
+# Ignore general build intermediate files
+*.o
+*.cmd
+*.ko
+*.mod.*
+*.mod
+Module.symvers
+modules.order
+/.tmp_versions/
+built-in.a
+
+# don't ignore .gitignore itself
+!.gitignore
diff --git a/qca-nss-clients/Makefile b/qca-nss-clients/Makefile
new file mode 100644
index 0000000..b0fb42d
--- /dev/null
+++ b/qca-nss-clients/Makefile
@@ -0,0 +1,47 @@
+# Makefile for the clients using the NSS driver
+
+ccflags-y := -I$(obj) -I$(obj)/..
+
+export BUILD_ID = "Build ID $(shell git rev-parse HEAD | head -c 8)"
+ccflags-y += -DNSS_CLIENT_BUILD_ID=$(BUILD_ID)
+
+qca-nss-tun6rd-objs := nss_connmgr_tun6rd.o
+
+ccflags-y += -DNSS_TUN6RD_DEBUG_LEVEL=0
+ccflags-y += -Werror
+
+KERNELVERSION := $(word 1, $(subst ., ,$(KERNELVERSION))).$(word 2, $(subst ., ,$(KERNELVERSION)))
+
+obj-$(bridge-mgr)+= bridge/
+obj-$(capwapmgr)+= capwapmgr/
+obj-$(dtlsmgr)+= dtls/$(DTLSMGR_DIR)/
+obj-$(gre)+= gre/
+obj-$(ipsecmgr)+= ipsecmgr/$(IPSECMGR_DIR)/
+obj-$(ipsecmgr-klips)+= ipsecmgr/$(IPSECMGR_DIR)/plugins/klips/
+obj-$(ipsecmgr-xfrm)+= ipsecmgr/$(IPSECMGR_DIR)/plugins/xfrm/
+obj-$(l2tpv2)+= l2tp/l2tpv2/
+obj-$(lag-mgr)+= lag/
+obj-$(map-t)+= map/map-t/
+obj-$(portifmgr)+= portifmgr/
+obj-$(pptp)+= pptp/
+obj-$(profile)+= profiler/
+obj-$(tunipip6)+= tunipip6/
+obj-$(tun6rd)+= qca-nss-tun6rd.o
+obj-$(qdisc)+= nss_qdisc/
+obj-$(vlan-mgr)+= vlan/
+obj-$(vxlanmgr)+= vxlanmgr/
+obj-$(pvxlanmgr)+= pvxlanmgr/
+obj-$(pppoe)+= pppoe/
+obj-$(ovpn-mgr)+= openvpn/
+obj-$(eogremgr)+= eogremgr/
+obj-$(clmapmgr)+= clmapmgr/
+obj-$(match)+= match/
+obj-$(tlsmgr)+= tls/
+obj-$(mirror)+= mirror/
+obj-$(mscs)+= mscs/
+obj-$(wifi-meshmgr)+= wifi_meshmgr/
+
+#NSS NETLINK
+obj-$(netlink)+= netlink/
+
+obj ?= .
diff --git a/qca-nss-clients/Makefile.fsm b/qca-nss-clients/Makefile.fsm
new file mode 100644
index 0000000..0ff2994
--- /dev/null
+++ b/qca-nss-clients/Makefile.fsm
@@ -0,0 +1,19 @@
+# Makefile for the clients using the NSS driver
+
+ccflags-y := -I$(obj) -I$(obj)/..
+export SoC = fsm9010
+
+export BUILD_ID = \"Build Id: $(shell date +'%m/%d/%y, %H:%M:%S')\"
+ccflags-y += -DNSS_CLIENT_BUILD_ID="$(BUILD_ID)"
+
+# Profiler
+obj-y+= profiler/
+
+#IPsec manager
+obj-y+= ipsecmgr/
+
+#NSS NETLINK
+obj-y+= netlink/
+
+obj ?= .
+
diff --git a/qca-nss-clients/bridge/Makefile b/qca-nss-clients/bridge/Makefile
new file mode 100644
index 0000000..80cec23
--- /dev/null
+++ b/qca-nss-clients/bridge/Makefile
@@ -0,0 +1,20 @@
+ccflags-y := -I$(obj)/../exports -I$(obj)/..  -I$(obj)/nss_hal/include
+ccflags-y += -DNSS_CLIENT_BUILD_ID="$(BUILD_ID)"
+ccflags-$(NSS_BRIDGE_MGR_OVS_ENABLE) += -DNSS_BRIDGE_MGR_OVS_ENABLE
+
+obj-m += qca-nss-bridge-mgr.o
+qca-nss-bridge-mgr-objs := nss_bridge_mgr.o
+ifeq ($(NSS_BRIDGE_MGR_OVS_ENABLE), y)
+qca-nss-bridge-mgr-objs += nss_bridge_mgr_ovs.o
+endif
+
+ccflags-y += -DNSS_BRIDGE_MGR_DEBUG_LEVEL=0
+ccflags-y += -Wall -Werror
+
+ifeq ($(SoC),$(filter $(SoC),ipq807x ipq807x_64 ipq60xx ipq60xx_64))
+ccflags-y += -DNSS_BRIDGE_MGR_PPE_SUPPORT
+endif
+
+ifneq (,$(filter $(CONFIG_BONDING),y m))
+ccflags-y += -DBONDING_SUPPORT
+endif
diff --git a/qca-nss-clients/bridge/nss_bridge_mgr.c b/qca-nss-clients/bridge/nss_bridge_mgr.c
new file mode 100644
index 0000000..57f1e2d
--- /dev/null
+++ b/qca-nss-clients/bridge/nss_bridge_mgr.c
@@ -0,0 +1,1644 @@
+/*
+ **************************************************************************
+ * Copyright (c) 2016-2021, The Linux Foundation. All rights reserved.
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all copies.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ **************************************************************************
+ */
+
+/*
+ * nss_bridge_mgr.c
+ *	NSS to HLOS Bridge Interface manager
+ */
+#include <linux/sysctl.h>
+#include <linux/etherdevice.h>
+#include <linux/if_vlan.h>
+#include <linux/of.h>
+#include <linux/if_bridge.h>
+#include <net/bonding.h>
+#if defined(NSS_BRIDGE_MGR_PPE_SUPPORT)
+#include <ref/ref_vsi.h>
+#include <nss_vlan_mgr.h>
+#include <fal/fal_fdb.h>
+#include <fal/fal_stp.h>
+#include <fal/fal_acl.h>
+#include <fal/fal_api.h>
+#include <fal/fal_port_ctrl.h>
+#endif
+#include <nss_api_if.h>
+
+#if defined(NSS_BRIDGE_MGR_OVS_ENABLE)
+#include <ovsmgr.h>
+#endif
+
+#include "nss_bridge_mgr_priv.h"
+
+/*
+ * Module parameter to enable/disable OVS bridge.
+ */
+static bool ovs_enabled = false;
+
+static struct nss_bridge_mgr_context br_mgr_ctx;
+
+/*
+ * nss_bridge_mgr_create_instance()
+ *	Create a bridge instance.
+ */
+static struct nss_bridge_pvt *nss_bridge_mgr_create_instance(struct net_device *dev)
+{
+	struct nss_bridge_pvt *br;
+
+#if !defined(NSS_BRIDGE_MGR_OVS_ENABLE)
+	if (!netif_is_bridge_master(dev))
+		return NULL;
+#else
+	/*
+	 * When OVS is enabled, we have to check for both bridge master
+	 * and OVS master.
+	 */
+	if (!netif_is_bridge_master(dev) && !ovsmgr_is_ovs_master(dev))
+		return NULL;
+#endif
+
+	br = kzalloc(sizeof(*br), GFP_KERNEL);
+	if (!br)
+		return NULL;
+
+	INIT_LIST_HEAD(&br->list);
+
+	return br;
+}
+
+/*
+ * nss_bridge_mgr_delete_instance()
+ *	Delete a bridge instance from bridge list and free the bridge instance.
+ */
+static void nss_bridge_mgr_delete_instance(struct nss_bridge_pvt *br)
+{
+	spin_lock(&br_mgr_ctx.lock);
+	if (!list_empty(&br->list))
+		list_del(&br->list);
+
+	spin_unlock(&br_mgr_ctx.lock);
+
+	kfree(br);
+}
+
+/*
+ * nss_bridge_mgr_find_instance()
+ *	Find a bridge instance from bridge list.
+ */
+struct nss_bridge_pvt *nss_bridge_mgr_find_instance(struct net_device *dev)
+{
+	struct nss_bridge_pvt *br;
+
+#if !defined(NSS_BRIDGE_MGR_OVS_ENABLE)
+	if (!netif_is_bridge_master(dev))
+		return NULL;
+#else
+	/*
+	 * When OVS is enabled, we have to check for both bridge master
+	 * and OVS master.
+	 */
+	if (!netif_is_bridge_master(dev) && !ovsmgr_is_ovs_master(dev))
+		return NULL;
+#endif
+	/*
+	 * Do we have it on record?
+	 */
+	spin_lock(&br_mgr_ctx.lock);
+	list_for_each_entry(br, &br_mgr_ctx.list, list) {
+		if (br->dev == dev) {
+			spin_unlock(&br_mgr_ctx.lock);
+			return br;
+		}
+	}
+
+	spin_unlock(&br_mgr_ctx.lock);
+	return NULL;
+}
+
+#if defined(NSS_BRIDGE_MGR_PPE_SUPPORT)
+/*
+ * nss_bridge_mgr_enable_fdb_learning()
+ *	Enable fdb learning in PPE.
+ */
+static int nss_bridge_mgr_enable_fdb_learning(struct nss_bridge_pvt *br)
+{
+	fal_vsi_newaddr_lrn_t newaddr_lrn;
+	fal_vsi_stamove_t sta_move;
+
+	/*
+	 * Enable station move
+	 */
+	sta_move.stamove_en = 1;
+	sta_move.action = FAL_MAC_FRWRD;
+	if (fal_vsi_stamove_set(NSS_BRIDGE_MGR_SWITCH_ID, br->vsi, &sta_move)) {
+		nss_bridge_mgr_warn("%px: Failed to enable station move for Bridge vsi\n", br);
+		return -1;
+	}
+
+	/*
+	 * Enable FDB learning in PPE
+	 */
+	newaddr_lrn.lrn_en = 1;
+	newaddr_lrn.action = FAL_MAC_FRWRD;
+	if (fal_vsi_newaddr_lrn_set(NSS_BRIDGE_MGR_SWITCH_ID, br->vsi, &newaddr_lrn)) {
+		nss_bridge_mgr_warn("%px: Failed to enable FDB learning for Bridge vsi\n", br);
+		goto disable_sta_move;
+	}
+
+	/*
+	 * Send a notification to NSS for FDB learning enable.
+	 */
+	if (nss_bridge_tx_set_fdb_learn_msg(br->ifnum, NSS_BRIDGE_FDB_LEARN_ENABLE) != NSS_TX_SUCCESS) {
+		nss_bridge_mgr_warn("%px: Tx message failed for FDB learning status\n", br);
+		goto disable_fdb_learning;
+	}
+
+	return 0;
+
+disable_fdb_learning:
+	newaddr_lrn.lrn_en = 0;
+	newaddr_lrn.action = FAL_MAC_FRWRD;
+	if (fal_vsi_newaddr_lrn_set(NSS_BRIDGE_MGR_SWITCH_ID, br->vsi, &newaddr_lrn))
+		nss_bridge_mgr_warn("%px: Failed to disable FDB learning for Bridge vsi\n", br);
+
+disable_sta_move:
+	sta_move.stamove_en = 0;
+	sta_move.action = FAL_MAC_FRWRD;
+	if (fal_vsi_stamove_set(NSS_BRIDGE_MGR_SWITCH_ID, br->vsi, &sta_move))
+		nss_bridge_mgr_warn("%px: Failed to disable station move for Bridge vsi\n", br);
+
+	return -1;
+}
+
+/*
+ * nss_bridge_mgr_disable_fdb_learning()
+ *	Disable fdb learning in PPE
+ *
+ * For the first time a bond interface join bridge, we need to use flow based rule.
+ * FDB learing/station move need to be disabled.
+ */
+static int nss_bridge_mgr_disable_fdb_learning(struct nss_bridge_pvt *br)
+{
+	fal_vsi_newaddr_lrn_t newaddr_lrn;
+	fal_vsi_stamove_t sta_move;
+
+	/*
+	 * Disable station move
+	 */
+	sta_move.stamove_en = 0;
+	sta_move.action = FAL_MAC_FRWRD;
+	if (fal_vsi_stamove_set(NSS_BRIDGE_MGR_SWITCH_ID, br->vsi, &sta_move)) {
+		nss_bridge_mgr_warn("%px: Failed to disable station move for Bridge vsi\n", br);
+		return -1;
+	}
+
+	/*
+	 * Disable FDB learning in PPE
+	 */
+	newaddr_lrn.lrn_en = 0;
+	newaddr_lrn.action = FAL_MAC_FRWRD;
+	if (fal_vsi_newaddr_lrn_set(NSS_BRIDGE_MGR_SWITCH_ID, br->vsi, &newaddr_lrn)) {
+		nss_bridge_mgr_warn("%px: Failed to disable FDB learning for Bridge vsi\n", br);
+		goto enable_sta_move;
+	}
+
+	/*
+	 * Flush FDB table for the bridge vsi
+	 */
+	if (fal_fdb_entry_del_byfid(NSS_BRIDGE_MGR_SWITCH_ID, br->vsi, FAL_FDB_DEL_STATIC)) {
+		nss_bridge_mgr_warn("%px: Failed to flush FDB table for vsi:%d in PPE\n", br, br->vsi);
+		goto enable_fdb_learning;
+	}
+
+	/*
+	 * Send a notification to NSS for FDB learning disable.
+	 */
+	if (nss_bridge_tx_set_fdb_learn_msg(br->ifnum, NSS_BRIDGE_FDB_LEARN_DISABLE) != NSS_TX_SUCCESS) {
+		nss_bridge_mgr_warn("%px: Tx message failed for FDB learning status\n", br);
+		goto enable_fdb_learning;
+	}
+
+	return 0;
+
+enable_fdb_learning:
+	newaddr_lrn.lrn_en = 1;
+	newaddr_lrn.action = FAL_MAC_FRWRD;
+	if (fal_vsi_newaddr_lrn_set(NSS_BRIDGE_MGR_SWITCH_ID, br->vsi, &newaddr_lrn))
+		nss_bridge_mgr_warn("%px: Failed to enable FDB learning for Bridge vsi\n", br);
+
+enable_sta_move:
+	sta_move.stamove_en = 1;
+	sta_move.action = FAL_MAC_FRWRD;
+	if (fal_vsi_stamove_set(NSS_BRIDGE_MGR_SWITCH_ID, br->vsi, &sta_move))
+		nss_bridge_mgr_warn("%px: Failed to enable station move for Bridge vsi\n", br);
+
+	return -1;
+}
+
+/*
+ * nss_bridge_mgr_add_bond_slave()
+ *	A slave interface being added to a bond master that belongs to a bridge.
+ */
+static int nss_bridge_mgr_add_bond_slave(struct net_device *bond_master,
+		struct net_device *slave, struct nss_bridge_pvt *b_pvt)
+{
+	uint32_t *port_vsi;
+	int32_t port_id;
+	int32_t ifnum;
+	int32_t lagid = 0;
+	int32_t bondid = -1;
+
+	/*
+	 * Figure out the aggregation id of this slave
+	 */
+#if defined(BONDING_SUPPORT)
+	bondid = bond_get_id(bond_master);
+#endif
+	if (bondid < 0) {
+		nss_bridge_mgr_warn("%px: Invalid LAG group id 0x%x\n",
+				b_pvt, bondid);
+		return -1;
+	}
+
+	lagid = bondid + NSS_LAG0_INTERFACE_NUM;
+
+	nss_bridge_mgr_trace("%px: Bond Slave %s is added bridge\n",
+			b_pvt, slave->name);
+
+	ifnum = nss_cmn_get_interface_number_by_dev(slave);
+
+	/*
+	 * Hardware supports only PHYSICAL Ports as trunk ports
+	 */
+	if (!NSS_BRIDGE_MGR_IF_IS_TYPE_PHYSICAL(ifnum)) {
+		nss_bridge_mgr_warn("%px: Interface %s is not Physical Interface\n",
+				b_pvt, slave->name);
+		return -1;
+	}
+
+	nss_bridge_mgr_trace("%px: Interface %s adding into bridge\n",
+			b_pvt, slave->name);
+	port_id = ifnum;
+
+	/*
+	 * Take bridge lock as we are updating vsi and port forwarding
+	 * details in PPE Hardware
+	 */
+	spin_lock(&br_mgr_ctx.lock);
+	port_vsi = &b_pvt->port_vsi[port_id - 1];
+
+	if (ppe_port_vsi_get(NSS_BRIDGE_MGR_SWITCH_ID, port_id, port_vsi)) {
+		spin_unlock(&br_mgr_ctx.lock);
+		nss_bridge_mgr_warn("%px: Couldn't get VSI for port %d\n",
+				b_pvt, port_id);
+		return -1;
+	}
+
+	if (ppe_port_vsi_set(NSS_BRIDGE_MGR_SWITCH_ID, port_id, b_pvt->vsi)) {
+		spin_unlock(&br_mgr_ctx.lock);
+		nss_bridge_mgr_warn("%px: Couldn't set bridge VSI for port %d\n",
+				b_pvt, port_id);
+		return -1;
+	}
+	spin_unlock(&br_mgr_ctx.lock);
+
+	if (nss_bridge_tx_join_msg(b_pvt->ifnum,
+				slave) != NSS_TX_SUCCESS) {
+		if (ppe_port_vsi_set(NSS_BRIDGE_MGR_SWITCH_ID, port_id, *port_vsi))
+			nss_bridge_mgr_warn("%px: Couldn't set bridge VSI for port %d\n", b_pvt, port_id);
+		nss_bridge_mgr_warn("%px: Couldn't add port %d in bridge",
+				b_pvt, port_id);
+		return -1;
+	}
+
+	spin_lock(&br_mgr_ctx.lock);
+	b_pvt->lag_ports[port_id - 1] = lagid;
+	spin_unlock(&br_mgr_ctx.lock);
+
+	return 0;
+}
+
+/*
+ * nss_bridge_mgr_del_bond_slave()
+ *	A slave interface being removed from a bond master that belongs to a bridge.
+ */
+static int nss_bridge_mgr_del_bond_slave(struct net_device *bond_master,
+		struct net_device *slave, struct nss_bridge_pvt *b_pvt)
+{
+	uint32_t *port_vsi;
+	int32_t port_id;
+	int32_t ifnum;
+	int32_t lagid = 0;
+	int32_t bondid = -1;
+
+	/*
+	 * Figure out the aggregation id of this slave
+	 */
+#if defined(BONDING_SUPPORT)
+	bondid = bond_get_id(bond_master);
+#endif
+	if (bondid < 0) {
+		nss_bridge_mgr_warn("%px: Invalid LAG group id 0x%x\n",
+				b_pvt, bondid);
+		return -1;
+	}
+
+	lagid = bondid + NSS_LAG0_INTERFACE_NUM;
+
+	nss_bridge_mgr_trace("%px: Bond Slave %s leaving bridge\n",
+			b_pvt, slave->name);
+
+	ifnum = nss_cmn_get_interface_number_by_dev(slave);
+
+	/*
+	 * Hardware supports only PHYSICAL Ports as trunk ports
+	 */
+	if (!NSS_BRIDGE_MGR_IF_IS_TYPE_PHYSICAL(ifnum)) {
+		nss_bridge_mgr_warn("%px: Interface %s is not Physical Interface\n",
+				b_pvt, slave->name);
+		return -1;
+	}
+
+	nss_bridge_mgr_trace("%px: Interface %s leaving from bridge\n",
+			b_pvt, slave->name);
+
+	port_id = (fal_port_t)ifnum;
+
+	/*
+	 * Take bridge lock as we are updating vsi and port forwarding
+	 * details in PPE Hardware
+	 */
+	spin_lock(&br_mgr_ctx.lock);
+	port_vsi = &b_pvt->port_vsi[port_id - 1];
+
+	if (b_pvt->lag_ports[port_id - 1] != lagid) {
+		spin_unlock(&br_mgr_ctx.lock);
+		return -1;
+	}
+
+	if (ppe_port_vsi_set(NSS_BRIDGE_MGR_SWITCH_ID, port_id, *port_vsi)) {
+		spin_unlock(&br_mgr_ctx.lock);
+		nss_bridge_mgr_warn("%px: failed to restore port VSI for port %d\n", b_pvt, port_id);
+		return -1;
+	}
+	spin_unlock(&br_mgr_ctx.lock);
+
+	if (nss_bridge_tx_leave_msg(b_pvt->ifnum,
+				slave) != NSS_TX_SUCCESS) {
+		ppe_port_vsi_set(NSS_BRIDGE_MGR_SWITCH_ID, port_id, b_pvt->vsi);
+		nss_bridge_mgr_trace("%px: Failed to remove port %d from bridge\n",
+				b_pvt, port_id);
+		return -1;
+	}
+
+	spin_lock(&br_mgr_ctx.lock);
+	b_pvt->lag_ports[port_id - 1] = 0;
+	spin_unlock(&br_mgr_ctx.lock);
+
+	/*
+	 * Set STP state to forwarding after bond physical port leaves bridge
+	 */
+	fal_stp_port_state_set(NSS_BRIDGE_MGR_SWITCH_ID, NSS_BRIDGE_MGR_SPANNING_TREE_ID,
+					port_id, FAL_STP_FORWARDING);
+	return 0;
+}
+
+/*
+ * nss_bridge_mgr_bond_master_join()
+ *	Add a bond interface to bridge
+ */
+static int nss_bridge_mgr_bond_master_join(struct net_device *bond_master,
+		struct nss_bridge_pvt *b_pvt)
+{
+	struct net_device *slave;
+
+	/*
+	 * bond enslave/release path is protected by rtnl lock
+	 */
+	ASSERT_RTNL();
+
+	/*
+	 * Wait for RCU QS
+	 */
+	synchronize_rcu();
+
+	/*
+	 * Join each of the bonded slaves to the VSI group
+	 */
+	for_each_netdev(&init_net, slave) {
+		if (netdev_master_upper_dev_get(slave) != bond_master) {
+			continue;
+		}
+
+		if (nss_bridge_mgr_add_bond_slave(bond_master, slave, b_pvt)) {
+			nss_bridge_mgr_warn("%px: Failed to add slave (%s) state in Bridge\n", b_pvt, slave->name);
+			goto cleanup;
+		}
+	}
+
+	/*
+	 * If already other bond devices are attached to bridge,
+	 * only increment bond_slave_num,
+	 */
+	spin_lock(&br_mgr_ctx.lock);
+	if (b_pvt->bond_slave_num) {
+		b_pvt->bond_slave_num++;
+		spin_unlock(&br_mgr_ctx.lock);
+		return NOTIFY_DONE;
+	}
+	spin_unlock(&br_mgr_ctx.lock);
+
+	/*
+	 * This is the first bond device being attached to bridge. In order to enforce Linux
+	 * bond slave selection in bridge flows involving bond interfaces, we need to disable
+	 * fdb learning on this bridge master to allow flow based bridging.
+	 */
+	if (!nss_bridge_mgr_disable_fdb_learning(b_pvt)) {
+		spin_lock(&br_mgr_ctx.lock);
+		b_pvt->bond_slave_num = 1;
+		spin_unlock(&br_mgr_ctx.lock);
+
+		return NOTIFY_DONE;
+	}
+
+cleanup:
+
+	for_each_netdev(&init_net, slave) {
+		if (netdev_master_upper_dev_get(slave) != bond_master) {
+			continue;
+		}
+
+		if (nss_bridge_mgr_del_bond_slave(bond_master, slave, b_pvt)) {
+			nss_bridge_mgr_warn("%px: Failed to remove slave (%s) from Bridge\n", b_pvt, slave->name);
+		}
+	}
+
+	return NOTIFY_BAD;
+}
+
+/*
+ * nss_bridge_mgr_bond_master_leave()
+ *	Remove a bond interface from bridge
+ */
+static int nss_bridge_mgr_bond_master_leave(struct net_device *bond_master,
+		struct nss_bridge_pvt *b_pvt)
+{
+	struct net_device *slave;
+
+	nss_bridge_mgr_assert(b_pvt->bond_slave_num == 0);
+
+	ASSERT_RTNL();
+
+	synchronize_rcu();
+
+	/*
+	 * Remove each of the bonded slaves from the VSI group
+	 */
+	for_each_netdev(&init_net, slave) {
+		if (netdev_master_upper_dev_get(slave) != bond_master) {
+			continue;
+		}
+
+		if (nss_bridge_mgr_del_bond_slave(bond_master, slave, b_pvt)) {
+			nss_bridge_mgr_warn("%px: Failed to remove slave (%s) from Bridge\n", b_pvt, slave->name);
+			goto cleanup;
+		}
+	}
+
+	/*
+	 * If more than one bond devices are attached to bridge,
+	 * only decrement the bond_slave_num
+	 */
+	spin_lock(&br_mgr_ctx.lock);
+	if (b_pvt->bond_slave_num > 1) {
+		b_pvt->bond_slave_num--;
+		spin_unlock(&br_mgr_ctx.lock);
+		return NOTIFY_DONE;
+	}
+	spin_unlock(&br_mgr_ctx.lock);
+
+	/*
+	 * The last bond interface is removed from bridge, we can switch back to FDB
+	 * learning mode.
+	 */
+	if (!nss_bridge_mgr_enable_fdb_learning(b_pvt)) {
+		spin_lock(&br_mgr_ctx.lock);
+		b_pvt->bond_slave_num = 0;
+		spin_unlock(&br_mgr_ctx.lock);
+
+		return NOTIFY_DONE;
+	}
+
+cleanup:
+	for_each_netdev(&init_net, slave) {
+		if (netdev_master_upper_dev_get(slave) != bond_master) {
+			continue;
+		}
+
+		if (nss_bridge_mgr_add_bond_slave(bond_master, slave, b_pvt)) {
+			nss_bridge_mgr_warn("%px: Failed to add slave (%s) state in Bridge\n", b_pvt, slave->name);
+		}
+	}
+
+	return NOTIFY_BAD;
+}
+
+/*
+ * nss_bridge_mgr_l2_exception_acl_enable()
+ *	Create ACL rule to enable L2 exception.
+ */
+static bool nss_bridge_mgr_l2_exception_acl_enable(void)
+{
+	sw_error_t error;
+	fal_acl_rule_t rule;
+
+	memset(&rule, 0, sizeof(rule));
+	error = fal_acl_list_creat(NSS_BRIDGE_MGR_ACL_DEV_ID, NSS_BRIDGE_MGR_ACL_LIST_ID,
+				NSS_BRIDGE_MGR_ACL_LIST_PRIORITY);
+	if (error != SW_OK) {
+		nss_bridge_mgr_warn("List creation failed with error = %d\n", error);
+		return false;
+	}
+
+	/*
+	 * Enable excpetion for packets with fragments.
+	 */
+	rule.rule_type = FAL_ACL_RULE_IP4;
+	rule.is_fragement_mask = 1;
+	rule.is_fragement_val = A_TRUE;
+	FAL_FIELD_FLG_SET(rule.field_flg, FAL_ACL_FIELD_L3_FRAGMENT);
+	FAL_ACTION_FLG_SET(rule.action_flg, FAL_ACL_ACTION_RDTCPU);
+
+	error = fal_acl_rule_add(NSS_BRIDGE_MGR_ACL_DEV_ID, NSS_BRIDGE_MGR_ACL_LIST_ID,
+				NSS_BRIDGE_MGR_ACL_FRAG_RULE_ID, NSS_BRIDGE_MGR_ACL_RULE_NR, &rule);
+	if (error != SW_OK) {
+		nss_bridge_mgr_warn("Could not add fragment acl rule, error = %d\n", error);
+		goto frag_fail;
+	}
+
+	/*
+	 * Enable excpetion for TCP FIN.
+	 */
+	memset(&rule, 0, sizeof(rule));
+
+	rule.rule_type = FAL_ACL_RULE_IP4;
+	rule.tcp_flag_val = 0x1 & 0x3f;
+	rule.tcp_flag_mask = 0x1 & 0x3f;
+	FAL_FIELD_FLG_SET(rule.field_flg, FAL_ACL_FIELD_TCP_FLAG);
+	FAL_ACTION_FLG_SET(rule.action_flg, FAL_ACL_ACTION_RDTCPU);
+
+	error = fal_acl_rule_add(NSS_BRIDGE_MGR_ACL_DEV_ID, NSS_BRIDGE_MGR_ACL_LIST_ID,
+				NSS_BRIDGE_MGR_ACL_FIN_RULE_ID, NSS_BRIDGE_MGR_ACL_RULE_NR, &rule);
+	if (error != SW_OK) {
+		nss_bridge_mgr_warn("Could not add TCP FIN rule, error = %d\n", error);
+		goto fin_fail;
+	}
+
+	/*
+	 * Enable excpetion for TCP SYN.
+	 */
+	memset(&rule, 0, sizeof(rule));
+
+	rule.rule_type = FAL_ACL_RULE_IP4;
+	rule.tcp_flag_val = 0x2 & 0x3f;
+	rule.tcp_flag_mask = 0x2 & 0x3f;
+	FAL_FIELD_FLG_SET(rule.field_flg, FAL_ACL_FIELD_TCP_FLAG);
+	FAL_ACTION_FLG_SET(rule.action_flg, FAL_ACL_ACTION_RDTCPU);
+
+	error = fal_acl_rule_add(NSS_BRIDGE_MGR_ACL_DEV_ID, NSS_BRIDGE_MGR_ACL_LIST_ID,
+				NSS_BRIDGE_MGR_ACL_SYN_RULE_ID, NSS_BRIDGE_MGR_ACL_RULE_NR, &rule);
+	if (error != SW_OK) {
+		nss_bridge_mgr_warn("Could not add TCP SYN rule, error = %d\n", error);
+		goto syn_fail;
+	}
+
+	/*
+	 * Enable excpetion for TCP RST.
+	 */
+	memset(&rule, 0, sizeof(rule));
+
+	rule.rule_type = FAL_ACL_RULE_IP4;
+	rule.tcp_flag_val = 0x4 & 0x3f;
+	rule.tcp_flag_mask = 0x4 & 0x3f;
+	FAL_FIELD_FLG_SET(rule.field_flg, FAL_ACL_FIELD_TCP_FLAG);
+	FAL_ACTION_FLG_SET(rule.action_flg, FAL_ACL_ACTION_RDTCPU);
+
+	error = fal_acl_rule_add(NSS_BRIDGE_MGR_ACL_DEV_ID, NSS_BRIDGE_MGR_ACL_LIST_ID,
+				NSS_BRIDGE_MGR_ACL_RST_RULE_ID, NSS_BRIDGE_MGR_ACL_RULE_NR, &rule);
+	if (error != SW_OK) {
+		nss_bridge_mgr_warn("Could not add TCP RST rule, error = %d\n", error);
+		goto rst_fail;
+	}
+
+	/*
+	 * Bind ACL list with service code
+	 */
+	error = fal_acl_list_bind(NSS_BRIDGE_MGR_ACL_DEV_ID, NSS_BRIDGE_MGR_ACL_LIST_ID,
+				FAL_ACL_DIREC_IN, FAL_ACL_BIND_SERVICE_CODE, NSS_PPE_SC_VLAN_FILTER_BYPASS);
+	if (error != SW_OK) {
+		nss_bridge_mgr_warn("Could not bind ACL list, error = %d\n", error);
+		goto bind_fail;
+	}
+
+	nss_bridge_mgr_info("Created ACL rule\n");
+	return true;
+
+bind_fail:
+	error = fal_acl_rule_delete(NSS_BRIDGE_MGR_ACL_DEV_ID, NSS_BRIDGE_MGR_ACL_LIST_ID,
+				NSS_BRIDGE_MGR_ACL_RST_RULE_ID, NSS_BRIDGE_MGR_ACL_RULE_NR);
+	if (error != SW_OK) {
+		nss_bridge_mgr_warn("TCP RST rule deletion failed, error %d\n", error);
+	}
+
+rst_fail:
+	error = fal_acl_rule_delete(NSS_BRIDGE_MGR_ACL_DEV_ID, NSS_BRIDGE_MGR_ACL_LIST_ID,
+				NSS_BRIDGE_MGR_ACL_SYN_RULE_ID, NSS_BRIDGE_MGR_ACL_RULE_NR);
+	if (error != SW_OK) {
+		nss_bridge_mgr_warn("TCP SYN rule deletion failed, error %d\n", error);
+	}
+
+syn_fail:
+	error = fal_acl_rule_delete(NSS_BRIDGE_MGR_ACL_DEV_ID, NSS_BRIDGE_MGR_ACL_LIST_ID,
+				NSS_BRIDGE_MGR_ACL_FIN_RULE_ID, NSS_BRIDGE_MGR_ACL_RULE_NR);
+	if (error != SW_OK) {
+		nss_bridge_mgr_warn("TCP FIN rule deletion failed, error %d\n", error);
+	}
+
+fin_fail:
+	error = fal_acl_rule_delete(NSS_BRIDGE_MGR_ACL_DEV_ID, NSS_BRIDGE_MGR_ACL_LIST_ID,
+				NSS_BRIDGE_MGR_ACL_FRAG_RULE_ID, NSS_BRIDGE_MGR_ACL_RULE_NR);
+	if (error != SW_OK) {
+		nss_bridge_mgr_warn("IP fragmentation rule deletion failed, error %d\n", error);
+	}
+
+frag_fail:
+	error = fal_acl_list_destroy(NSS_BRIDGE_MGR_ACL_DEV_ID, NSS_BRIDGE_MGR_ACL_LIST_ID);
+	if (error != SW_OK) {
+		nss_bridge_mgr_warn("ACL list destroy failed, error %d\n", error);
+	}
+
+	return false;
+}
+
+/*
+ * nss_bridge_mgr_l2_exception_acl_disable()
+ *	Destroy ACL list and rule created by the driver.
+ */
+static void nss_bridge_mgr_l2_exception_acl_disable(void)
+{
+	sw_error_t error;
+
+	error = fal_acl_rule_delete(NSS_BRIDGE_MGR_ACL_DEV_ID, NSS_BRIDGE_MGR_ACL_LIST_ID,
+				NSS_BRIDGE_MGR_ACL_SYN_RULE_ID, NSS_BRIDGE_MGR_ACL_RULE_NR);
+	if (error != SW_OK) {
+		nss_bridge_mgr_warn("TCP SYN rule deletion failed, error %d\n", error);
+	}
+
+	error = fal_acl_rule_delete(NSS_BRIDGE_MGR_ACL_DEV_ID, NSS_BRIDGE_MGR_ACL_LIST_ID,
+				NSS_BRIDGE_MGR_ACL_FIN_RULE_ID, NSS_BRIDGE_MGR_ACL_RULE_NR);
+	if (error != SW_OK) {
+		nss_bridge_mgr_warn("TCP FIN rule deletion failed, error %d\n", error);
+	}
+
+	error = fal_acl_rule_delete(NSS_BRIDGE_MGR_ACL_DEV_ID, NSS_BRIDGE_MGR_ACL_LIST_ID,
+				NSS_BRIDGE_MGR_ACL_RST_RULE_ID, NSS_BRIDGE_MGR_ACL_RULE_NR);
+	if (error != SW_OK) {
+		nss_bridge_mgr_warn("TCP RST rule deletion failed, error %d\n", error);
+	}
+
+	error = fal_acl_rule_delete(NSS_BRIDGE_MGR_ACL_DEV_ID, NSS_BRIDGE_MGR_ACL_LIST_ID,
+				NSS_BRIDGE_MGR_ACL_FRAG_RULE_ID, NSS_BRIDGE_MGR_ACL_RULE_NR);
+	if (error != SW_OK) {
+		nss_bridge_mgr_warn("IP fragmentation rule deletion failed, error %d\n", error);
+	}
+
+	error = fal_acl_list_destroy(NSS_BRIDGE_MGR_ACL_DEV_ID, NSS_BRIDGE_MGR_ACL_LIST_ID);
+	if (error != SW_OK) {
+		nss_bridge_mgr_warn("ACL list destroy failed, error %d\n", error);
+	}
+}
+
+#endif
+
+/*
+ * nss_bridge_mgr_join_bridge()
+ *	Netdevice join bridge and send netdevice joining bridge message to NSS FW.
+ */
+int nss_bridge_mgr_join_bridge(struct net_device *dev, struct nss_bridge_pvt *br)
+{
+	int32_t ifnum;
+
+	ifnum = nss_cmn_get_interface_number_by_dev(dev);
+	if (ifnum < 0) {
+		nss_bridge_mgr_warn("%s: failed to find interface number\n", dev->name);
+		return -EINVAL;
+	}
+
+#if defined(NSS_BRIDGE_MGR_PPE_SUPPORT)
+	if (NSS_BRIDGE_MGR_IF_IS_TYPE_PHYSICAL(ifnum)) {
+		fal_port_t port_num = (fal_port_t)ifnum;
+
+		/*
+		 * If there is a wan interface added in bridge, create a
+		 * separate VSI for it, hence avoiding FDB based forwarding.
+		 * This is done by not sending join message to the bridge in NSS.
+		 */
+		if (br_mgr_ctx.wan_if_num == ifnum) {
+			br->wan_if_enabled = true;
+			br->wan_if_num = ifnum;
+			nss_bridge_mgr_info("if_num %d is added as WAN interface \n", ifnum);
+			return 0;
+		}
+
+		if (ppe_port_vsi_get(NSS_BRIDGE_MGR_SWITCH_ID, port_num, &br->port_vsi[port_num - 1])) {
+			nss_bridge_mgr_warn("%px: failed to save port VSI of physical interface\n", br);
+			return -EIO;
+		}
+
+		if (ppe_port_vsi_set(NSS_BRIDGE_MGR_SWITCH_ID, port_num, br->vsi)) {
+			nss_bridge_mgr_warn("%px: failed to set bridge VSI for physical interface\n", br);
+			return -EIO;
+		}
+	} else if (is_vlan_dev(dev)) {
+		struct net_device *real_dev;
+
+		/*
+		 * Find real_dev associated with the VLAN
+		 */
+		real_dev = nss_vlan_mgr_get_real_dev(dev);
+		if (real_dev && is_vlan_dev(real_dev))
+			real_dev = nss_vlan_mgr_get_real_dev(real_dev);
+		if (real_dev == NULL) {
+			nss_bridge_mgr_warn("%px: real dev for the vlan: %s in NULL\n", br, dev->name);
+			return -EINVAL;
+		}
+
+		/*
+		 * This is a valid vlan dev, add the vlan dev to bridge
+		 */
+		if (nss_vlan_mgr_join_bridge(dev, br->vsi)) {
+			nss_bridge_mgr_warn("%px: vlan device failed to join bridge\n", br);
+			return -ENODEV;
+		}
+
+		/*
+		 * dev is a bond with VLAN and VLAN is added to bridge
+		 */
+		if (netif_is_bond_master(real_dev)) {
+			if (nss_bridge_tx_join_msg(br->ifnum, dev) != NSS_TX_SUCCESS) {
+				nss_bridge_mgr_warn("%px: Interface %s join bridge failed\n", br, dev->name);
+				nss_vlan_mgr_leave_bridge(dev, br->vsi);
+				return -ENOENT;
+			}
+
+			/*
+			 * Add the bond_master to bridge.
+			 */
+			if (nss_bridge_mgr_bond_master_join(real_dev, br) != NOTIFY_DONE) {
+				nss_bridge_mgr_warn("%px: Slaves of bond interface %s join bridge failed\n", br, real_dev->name);
+				nss_bridge_tx_leave_msg(br->ifnum, dev);
+				nss_vlan_mgr_leave_bridge(dev, br->vsi);
+				return -EINVAL;
+			}
+
+			return 0;
+		}
+	}
+#endif
+
+	if (nss_bridge_tx_join_msg(br->ifnum, dev) != NSS_TX_SUCCESS) {
+		nss_bridge_mgr_warn("%px: Interface %s join bridge failed\n", br, dev->name);
+#if defined(NSS_BRIDGE_MGR_PPE_SUPPORT)
+		if (NSS_BRIDGE_MGR_IF_IS_TYPE_PHYSICAL(ifnum)) {
+			fal_port_t port_num = (fal_port_t)ifnum;
+			ppe_port_vsi_set(NSS_BRIDGE_MGR_SWITCH_ID, port_num, br->port_vsi[port_num - 1]);
+		}
+#endif
+		return -EIO;
+	}
+
+	return 0;
+}
+
+/*
+ * nss_bridge_mgr_leave_bridge()
+ *	Netdevice leave bridge and send netdevice leaving bridge message to NSS FW.
+ */
+int nss_bridge_mgr_leave_bridge(struct net_device *dev, struct nss_bridge_pvt *br)
+{
+	int32_t ifnum;
+
+	ifnum = nss_cmn_get_interface_number_by_dev(dev);
+	if (ifnum < 0) {
+		nss_bridge_mgr_warn("%s: failed to find interface number\n", dev->name);
+		return -1;
+	}
+
+#if defined(NSS_BRIDGE_MGR_PPE_SUPPORT)
+	if (NSS_BRIDGE_MGR_IF_IS_TYPE_PHYSICAL(ifnum)) {
+		fal_port_t port_num = (fal_port_t)ifnum;
+
+		if (fal_stp_port_state_set(NSS_BRIDGE_MGR_SWITCH_ID, NSS_BRIDGE_MGR_SPANNING_TREE_ID, port_num, FAL_STP_FORWARDING)) {
+			nss_bridge_mgr_warn("%px: faied to set the STP state to forwarding\n", br);
+			return -1;
+		}
+
+		/*
+		 * If there is a wan interface added in bridge, a separate
+		 * VSI is created for it by not sending join message to NSS.
+		 * Hence a leave message should also be avaoided.
+		 */
+		if ((br->wan_if_enabled) && (br->wan_if_num == ifnum)) {
+			br->wan_if_enabled = false;
+			br->wan_if_num = -1;
+			nss_bridge_mgr_info("if_num %d is added as WAN interface\n", ifnum);
+			return 0;
+		}
+
+		if (ppe_port_vsi_set(NSS_BRIDGE_MGR_SWITCH_ID, port_num, br->port_vsi[port_num - 1])) {
+			nss_bridge_mgr_warn("%px: failed to restore port VSI of physical interface\n", br);
+			fal_stp_port_state_set(NSS_BRIDGE_MGR_SWITCH_ID, NSS_BRIDGE_MGR_SPANNING_TREE_ID, port_num, FAL_STP_DISABLED);
+			return -1;
+		}
+	} else if (is_vlan_dev(dev)) {
+		struct net_device *real_dev;
+
+		/*
+		 * Find real_dev associated with the VLAN.
+		 */
+		real_dev = nss_vlan_mgr_get_real_dev(dev);
+		if (real_dev && is_vlan_dev(real_dev))
+			real_dev = nss_vlan_mgr_get_real_dev(real_dev);
+		if (real_dev == NULL) {
+			nss_bridge_mgr_warn("%px: real dev for the vlan: %s in NULL\n", br, dev->name);
+			return -1;
+		}
+
+		/*
+		 * This is a valid vlan dev, remove the vlan dev from bridge.
+		 */
+		if (nss_vlan_mgr_leave_bridge(dev, br->vsi)) {
+			nss_bridge_mgr_warn("%px: vlan device failed to leave bridge\n", br);
+			return -1;
+		}
+
+		/*
+		 * dev is a bond with VLAN and VLAN is removed from bridge
+		 */
+		if (netif_is_bond_master(real_dev)) {
+			if (nss_bridge_tx_leave_msg(br->ifnum, dev) != NSS_TX_SUCCESS) {
+				nss_bridge_mgr_warn("%px: Interface %s leave bridge failed\n", br, dev->name);
+				nss_vlan_mgr_join_bridge(dev, br->vsi);
+				nss_bridge_tx_join_msg(br->ifnum, dev);
+				return -1;
+			}
+
+			/*
+			 * Remove the bond_master from bridge.
+			 */
+			if (nss_bridge_mgr_bond_master_leave(real_dev, br) != NOTIFY_DONE) {
+				nss_bridge_mgr_warn("%px: Slaves of bond interface %s leave bridge failed\n", br, real_dev->name);
+				nss_vlan_mgr_join_bridge(dev, br->vsi);
+				nss_bridge_tx_join_msg(br->ifnum, dev);
+				return -1;
+			}
+
+			return 0;
+		}
+	}
+#endif
+
+	if (nss_bridge_tx_leave_msg(br->ifnum, dev) != NSS_TX_SUCCESS) {
+		nss_bridge_mgr_warn("%px: Interface %s leave bridge failed\n", br, dev->name);
+#if defined(NSS_BRIDGE_MGR_PPE_SUPPORT)
+		if (is_vlan_dev(dev)) {
+			nss_vlan_mgr_join_bridge(dev, br->vsi);
+			nss_bridge_tx_join_msg(br->ifnum, dev);
+		} else if (NSS_BRIDGE_MGR_IF_IS_TYPE_PHYSICAL(ifnum)) {
+			fal_port_t port_num = (fal_port_t)ifnum;
+
+			fal_stp_port_state_set(NSS_BRIDGE_MGR_SWITCH_ID, NSS_BRIDGE_MGR_SPANNING_TREE_ID, port_num, FAL_STP_DISABLED);
+			ppe_port_vsi_set(NSS_BRIDGE_MGR_SWITCH_ID, port_num, br->vsi);
+		}
+#endif
+		return -1;
+	}
+
+	return 0;
+}
+
+/*
+ * nss_bridge_mgr_unregister_br()
+ *	Unregister bridge device, dev, from bridge manager database.
+ */
+int nss_bridge_mgr_unregister_br(struct net_device *dev)
+{
+	struct nss_bridge_pvt *b_pvt;
+
+	/*
+	 * Do we have it on record?
+	 */
+	b_pvt = nss_bridge_mgr_find_instance(dev);
+	if (!b_pvt)
+		return -1;
+
+	/*
+	 * sequence of free:
+	 * 1. issue VSI unassign to NSS
+	 * 2. free VSI
+	 * 3. flush bridge FDB table
+	 * 4. unregister bridge netdevice from data plane
+	 * 5. deallocate dynamic interface associated with bridge netdevice
+	 * 6. free bridge netdevice
+	 */
+#if defined(NSS_BRIDGE_MGR_PPE_SUPPORT)
+	/*
+	 * VSI unassign function in NSS firmware only returns
+	 * CNODE_SEND_NACK in the beginning of the function when it
+	 * detects that bridge VSI is not assigned for the bridge.
+	 * Please refer to the function bridge_configure_vsi_unassign
+	 * in NSS firmware for detailed operation.
+	 */
+	if (nss_bridge_tx_vsi_unassign_msg(b_pvt->ifnum, b_pvt->vsi) != NSS_TX_SUCCESS)
+		nss_bridge_mgr_warn("%px: failed to unassign vsi\n", b_pvt);
+
+	ppe_vsi_free(NSS_BRIDGE_MGR_SWITCH_ID, b_pvt->vsi);
+
+	/*
+	 * It may happen that the same VSI is allocated again,
+	 * so there is a need to flush bridge FDB table.
+	 */
+	if (fal_fdb_entry_del_byfid(NSS_BRIDGE_MGR_SWITCH_ID, b_pvt->vsi, FAL_FDB_DEL_STATIC)) {
+		nss_bridge_mgr_warn("%px: Failed to flush FDB table for vsi:%d in PPE\n", b_pvt, b_pvt->vsi);
+	}
+#endif
+
+	nss_bridge_mgr_trace("%px: Bridge %s unregsitered. Freeing bridge di %d\n", b_pvt, dev->name, b_pvt->ifnum);
+
+	nss_bridge_unregister(b_pvt->ifnum);
+
+	if (nss_dynamic_interface_dealloc_node(b_pvt->ifnum, NSS_DYNAMIC_INTERFACE_TYPE_BRIDGE) != NSS_TX_SUCCESS) {
+		nss_bridge_mgr_warn("%px: dealloc bridge di failed\n", b_pvt);
+	}
+
+	nss_bridge_mgr_delete_instance(b_pvt);
+	return 0;
+}
+
+/*
+ * nss_bridge_mgr_register_br()
+ *	Register new bridge instance in bridge manager database.
+ */
+int nss_bridge_mgr_register_br(struct net_device *dev)
+{
+	struct nss_bridge_pvt *b_pvt;
+	int ifnum;
+	int err;
+#if defined(NSS_BRIDGE_MGR_PPE_SUPPORT)
+	uint32_t vsi_id = 0;
+#endif
+
+	nss_bridge_mgr_info("%px: Bridge register: %s\n", dev, dev->name);
+
+	b_pvt = nss_bridge_mgr_create_instance(dev);
+	if (!b_pvt)
+		return -EINVAL;
+
+	b_pvt->dev = dev;
+
+	ifnum = nss_dynamic_interface_alloc_node(NSS_DYNAMIC_INTERFACE_TYPE_BRIDGE);
+	if (ifnum < 0) {
+		nss_bridge_mgr_warn("%px: failed to alloc bridge di\n", b_pvt);
+		nss_bridge_mgr_delete_instance(b_pvt);
+		return -EFAULT;
+	}
+
+	if (!nss_bridge_register(ifnum, dev, NULL, NULL, 0, b_pvt)) {
+		nss_bridge_mgr_warn("%px: failed to register bridge di to NSS\n", b_pvt);
+		goto fail;
+	}
+
+#if defined(NSS_BRIDGE_MGR_PPE_SUPPORT)
+	err = ppe_vsi_alloc(NSS_BRIDGE_MGR_SWITCH_ID, &vsi_id);
+	if (err) {
+		nss_bridge_mgr_warn("%px: failed to alloc bridge vsi, error = %d\n", b_pvt, err);
+		goto fail_1;
+	}
+
+	b_pvt->vsi = vsi_id;
+
+	err = nss_bridge_tx_vsi_assign_msg(ifnum, vsi_id);
+	if (err != NSS_TX_SUCCESS) {
+		nss_bridge_mgr_warn("%px: failed to assign vsi msg, error = %d\n", b_pvt, err);
+		goto fail_2;
+	}
+#endif
+
+	err = nss_bridge_tx_set_mac_addr_msg(ifnum, dev->dev_addr);
+	if (err != NSS_TX_SUCCESS) {
+		nss_bridge_mgr_warn("%px: failed to set mac_addr msg, error = %d\n", b_pvt, err);
+		goto fail_3;
+	}
+
+	err = nss_bridge_tx_set_mtu_msg(ifnum, dev->mtu);
+	if (err != NSS_TX_SUCCESS) {
+		nss_bridge_mgr_warn("%px: failed to set mtu msg, error = %d\n", b_pvt, err);
+		goto fail_3;
+	}
+
+	/*
+	 * All done, take a snapshot of the current mtu and mac addrees
+	 */
+	b_pvt->ifnum = ifnum;
+	b_pvt->mtu = dev->mtu;
+	b_pvt->wan_if_num = -1;
+	b_pvt->wan_if_enabled = false;
+	ether_addr_copy(b_pvt->dev_addr, dev->dev_addr);
+	spin_lock(&br_mgr_ctx.lock);
+	list_add(&b_pvt->list, &br_mgr_ctx.list);
+	spin_unlock(&br_mgr_ctx.lock);
+
+#if defined(NSS_BRIDGE_MGR_PPE_SUPPORT)
+	/*
+	 * Disable FDB learning if OVS is enabled for
+	 * all bridges (including Linux bridge).
+	 */
+	if (ovs_enabled) {
+		nss_bridge_mgr_disable_fdb_learning(b_pvt);
+	}
+#endif
+	return 0;
+
+fail_3:
+#if defined(NSS_BRIDGE_MGR_PPE_SUPPORT)
+	if (nss_bridge_tx_vsi_unassign_msg(ifnum, vsi_id) != NSS_TX_SUCCESS) {
+		nss_bridge_mgr_warn("%px: failed to unassign vsi\n", b_pvt);
+	}
+
+fail_2:
+	ppe_vsi_free(NSS_BRIDGE_MGR_SWITCH_ID, vsi_id);
+
+fail_1:
+#endif
+	nss_bridge_unregister(ifnum);
+
+fail:
+	if (nss_dynamic_interface_dealloc_node(ifnum, NSS_DYNAMIC_INTERFACE_TYPE_BRIDGE) != NSS_TX_SUCCESS) {
+		nss_bridge_mgr_warn("%px: failed to dealloc bridge di\n", b_pvt);
+	}
+
+	nss_bridge_mgr_delete_instance(b_pvt);
+	return -EFAULT;
+}
+
+/*
+ * nss_bridge_mgr_bond_slave_changeupper()
+ *	Add bond slave to bridge VSI
+ */
+static int nss_bridge_mgr_bond_slave_changeupper(struct netdev_notifier_changeupper_info *cu_info,
+		struct net_device *bond_slave)
+{
+	struct net_device *master;
+	struct nss_bridge_pvt *b_pvt;
+
+	/*
+	 * Checking if our bond master is part of a bridge
+	 */
+	master = netdev_master_upper_dev_get(cu_info->upper_dev);
+	if (!master)
+		return NOTIFY_DONE;
+
+	b_pvt = nss_bridge_mgr_find_instance(master);
+	if (!b_pvt) {
+		nss_bridge_mgr_warn("The bond master is not part of Bridge dev:%s\n", master->name);
+		return NOTIFY_DONE;
+	}
+
+	/*
+	 * Add or remove the slave based based on linking event
+	 */
+	if (cu_info->linking) {
+		if (nss_bridge_mgr_add_bond_slave(cu_info->upper_dev, bond_slave, b_pvt)) {
+			nss_bridge_mgr_warn("%px: Failed to add slave (%s) state in Bridge %s\n", b_pvt,
+					cu_info->upper_dev->name, master->name);
+		}
+	} else {
+		if (nss_bridge_mgr_del_bond_slave(cu_info->upper_dev, bond_slave, b_pvt)) {
+			nss_bridge_mgr_warn("%px: Failed to remove slave (%s) state in Bridge %s\n", b_pvt,
+					cu_info->upper_dev->name, master->name);
+		}
+	}
+
+	return NOTIFY_DONE;
+}
+
+/*
+ * nss_bridge_mgr_changemtu_event()
+ *	Change bridge MTU and send change bridge MTU message to NSS FW.
+ */
+static int nss_bridge_mgr_changemtu_event(struct netdev_notifier_info *info)
+{
+	struct net_device *dev = netdev_notifier_info_to_dev(info);
+	struct nss_bridge_pvt *b_pvt = nss_bridge_mgr_find_instance(dev);
+
+	if (!b_pvt)
+		return NOTIFY_DONE;
+
+	spin_lock(&br_mgr_ctx.lock);
+	if (b_pvt->mtu == dev->mtu) {
+		spin_unlock(&br_mgr_ctx.lock);
+		return NOTIFY_DONE;
+	}
+	spin_unlock(&br_mgr_ctx.lock);
+
+	nss_bridge_mgr_trace("%px: MTU changed to %d, send message to NSS\n", b_pvt, dev->mtu);
+
+	if (nss_bridge_tx_set_mtu_msg(b_pvt->ifnum, dev->mtu) != NSS_TX_SUCCESS) {
+		nss_bridge_mgr_warn("%px: Failed to send change MTU message to NSS\n", b_pvt);
+		return NOTIFY_DONE;
+	}
+
+	spin_lock(&br_mgr_ctx.lock);
+	b_pvt->mtu = dev->mtu;
+	spin_unlock(&br_mgr_ctx.lock);
+
+	return NOTIFY_DONE;
+}
+
+/*
+ * nss_bridge_mgr_changeaddr_event()
+ *	Change bridge MAC address and send change bridge address message to NSS FW.
+ */
+static int nss_bridge_mgr_changeaddr_event(struct netdev_notifier_info *info)
+{
+	struct net_device *dev = netdev_notifier_info_to_dev(info);
+	struct nss_bridge_pvt *b_pvt = nss_bridge_mgr_find_instance(dev);
+
+	if (!b_pvt)
+		return NOTIFY_DONE;
+
+	spin_lock(&br_mgr_ctx.lock);
+	if (!memcmp(b_pvt->dev_addr, dev->dev_addr, ETH_ALEN)) {
+		spin_unlock(&br_mgr_ctx.lock);
+		nss_bridge_mgr_trace("%px: MAC are the same..skip processing it\n", b_pvt);
+		return NOTIFY_DONE;
+	}
+	spin_unlock(&br_mgr_ctx.lock);
+
+	nss_bridge_mgr_trace("%px: MAC changed to %pM, update NSS\n", b_pvt, dev->dev_addr);
+
+	if (nss_bridge_tx_set_mac_addr_msg(b_pvt->ifnum, dev->dev_addr) != NSS_TX_SUCCESS) {
+		nss_bridge_mgr_warn("%px: Failed to send change MAC address message to NSS\n", b_pvt);
+		return NOTIFY_DONE;
+	}
+
+	spin_lock(&br_mgr_ctx.lock);
+	ether_addr_copy(b_pvt->dev_addr, dev->dev_addr);
+	spin_unlock(&br_mgr_ctx.lock);
+
+	return NOTIFY_DONE;
+}
+
+/*
+ * nss_bridge_mgr_changeupper_event()
+ *	Bridge manager handles netdevice joining or leaving bridge notification.
+ */
+static int nss_bridge_mgr_changeupper_event(struct netdev_notifier_info *info)
+{
+	struct net_device *dev = netdev_notifier_info_to_dev(info);
+	struct net_device *master_dev;
+	struct netdev_notifier_changeupper_info *cu_info;
+	struct nss_bridge_pvt *b_pvt;
+
+	cu_info = (struct netdev_notifier_changeupper_info *)info;
+
+	/*
+	 * Check if the master pointer is valid
+	 */
+	if (!cu_info->master)
+		return NOTIFY_DONE;
+
+	/*
+	 * The master is a bond that we don't need to process, but the bond might be part of a bridge.
+	 */
+	if (netif_is_bond_slave(dev))
+		return nss_bridge_mgr_bond_slave_changeupper(cu_info, dev);
+
+	master_dev = cu_info->upper_dev;
+
+	/*
+	 * Check if upper_dev is a known bridge.
+	 */
+	b_pvt = nss_bridge_mgr_find_instance(master_dev);
+	if (!b_pvt)
+		return NOTIFY_DONE;
+
+	/*
+	 * Slave device is bond master and it is added/removed to/from bridge
+	 */
+	if (netif_is_bond_master(dev)) {
+#if defined(NSS_BRIDGE_MGR_PPE_SUPPORT)
+		if (cu_info->linking)
+			return nss_bridge_mgr_bond_master_join(dev, b_pvt);
+		else
+			return nss_bridge_mgr_bond_master_leave(dev, b_pvt);
+#endif
+	}
+
+	if (cu_info->linking) {
+		nss_bridge_mgr_trace("%px: Interface %s joining bridge %s\n", b_pvt, dev->name, master_dev->name);
+		if (nss_bridge_mgr_join_bridge(dev, b_pvt)) {
+			nss_bridge_mgr_warn("%px: Interface %s failed to join bridge %s\n", b_pvt, dev->name, master_dev->name);
+		}
+
+		return NOTIFY_DONE;
+	}
+
+	nss_bridge_mgr_trace("%px: Interface %s leaving bridge %s\n", b_pvt, dev->name, master_dev->name);
+	if (nss_bridge_mgr_leave_bridge(dev, b_pvt)) {
+		nss_bridge_mgr_warn("%px: Interface %s failed to leave bridge %s\n", b_pvt, dev->name, master_dev->name);
+	}
+
+	return NOTIFY_DONE;
+}
+
+/*
+ * nss_bridge_mgr_register_event()
+ *	Bridge manager handles bridge registration notification.
+ */
+static int nss_bridge_mgr_register_event(struct netdev_notifier_info *info)
+{
+	nss_bridge_mgr_register_br(netdev_notifier_info_to_dev(info));
+	return NOTIFY_DONE;
+}
+
+/*
+ * nss_bridge_mgr_unregister_event()
+ *	Bridge manager handles bridge unregistration notification.
+ */
+static int nss_bridge_mgr_unregister_event(struct netdev_notifier_info *info)
+{
+	nss_bridge_mgr_unregister_br(netdev_notifier_info_to_dev(info));
+	return NOTIFY_DONE;
+}
+
+/*
+ * nss_bridge_mgr_netdevice_event()
+ *	Bridge manager handles bridge operation notifications.
+ */
+static int nss_bridge_mgr_netdevice_event(struct notifier_block *unused,
+				unsigned long event, void *ptr)
+{
+	struct netdev_notifier_info *info = (struct netdev_notifier_info *)ptr;
+
+	switch (event) {
+	case NETDEV_CHANGEUPPER:
+		return nss_bridge_mgr_changeupper_event(info);
+	case NETDEV_CHANGEADDR:
+		return nss_bridge_mgr_changeaddr_event(info);
+	case NETDEV_CHANGEMTU:
+		return nss_bridge_mgr_changemtu_event(info);
+	case NETDEV_REGISTER:
+		return nss_bridge_mgr_register_event(info);
+	case NETDEV_UNREGISTER:
+		return nss_bridge_mgr_unregister_event(info);
+	}
+
+	/*
+	 * Notify done for all the events we don't care
+	 */
+	return NOTIFY_DONE;
+}
+
+static struct notifier_block nss_bridge_mgr_netdevice_nb __read_mostly = {
+	.notifier_call = nss_bridge_mgr_netdevice_event,
+};
+
+#if defined(NSS_BRIDGE_MGR_PPE_SUPPORT)
+/*
+ * nss_bridge_mgr_is_physical_dev()
+ *	Check if the device is on physical device.
+ */
+static bool nss_bridge_mgr_is_physical_dev(struct net_device *dev)
+{
+	struct net_device *root_dev = dev;
+	uint32_t ifnum;
+
+	if (!dev)
+		return false;
+
+	/*
+	 * Check if it is VLAN first because VLAN can be over bond interface.
+	 * However, the bond over VLAN is not supported in our driver.
+	 */
+	if (is_vlan_dev(dev)) {
+		root_dev = nss_vlan_mgr_get_real_dev(dev);
+		if (!root_dev)
+			goto error;
+
+		if (is_vlan_dev(root_dev))
+			root_dev = nss_vlan_mgr_get_real_dev(root_dev);
+
+		if (!root_dev)
+			goto error;
+	}
+
+	/*
+	 * Don't consider bond interface because FDB learning is disabled.
+	 */
+	if (netif_is_bond_master(root_dev))
+		return false;
+
+	ifnum = nss_cmn_get_interface_number_by_dev(root_dev);
+	if (!NSS_BRIDGE_MGR_IF_IS_TYPE_PHYSICAL(ifnum)) {
+		nss_bridge_mgr_warn("%px: interface %s is not physical interface\n",
+				root_dev, root_dev->name);
+		return false;
+	}
+
+	return true;
+
+error:
+	nss_bridge_mgr_warn("%px: cannot find the real device for VLAN %s\n", dev, dev->name);
+	return false;
+}
+
+/*
+ * nss_bridge_mgr_fdb_update_callback()
+ *	Get invoked when there is a FDB update.
+ */
+static int nss_bridge_mgr_fdb_update_callback(struct notifier_block *notifier,
+					      unsigned long val, void *ctx)
+{
+	struct br_fdb_event *event = (struct br_fdb_event *)ctx;
+	struct nss_bridge_pvt *b_pvt = NULL;
+	struct net_device *br_dev = NULL;
+	fal_fdb_entry_t entry;
+
+	if (!event->br)
+		return NOTIFY_DONE;
+
+	br_dev = br_fdb_bridge_dev_get_and_hold(event->br);
+	if (!br_dev) {
+		nss_bridge_mgr_warn("%px: bridge device not found\n", event->br);
+		return NOTIFY_DONE;
+	}
+
+	nss_bridge_mgr_trace("%px: MAC: %pM, original source: %s, new source: %s, bridge: %s\n",
+			event, event->addr, event->orig_dev->name, event->dev->name, br_dev->name);
+
+	/*
+	 * When a MAC address move from a physical interface to a non-physical
+	 * interface, the FDB entry in the PPE needs to be flushed.
+	 */
+	if (!nss_bridge_mgr_is_physical_dev(event->orig_dev)) {
+		nss_bridge_mgr_trace("%px: original source is not a physical interface\n", event->orig_dev);
+		dev_put(br_dev);
+		return NOTIFY_DONE;
+	}
+
+	if (nss_bridge_mgr_is_physical_dev(event->dev)) {
+		nss_bridge_mgr_trace("%px: new source is not a non-physical interface\n", event->dev);
+		dev_put(br_dev);
+		return NOTIFY_DONE;
+	}
+
+	b_pvt = nss_bridge_mgr_find_instance(br_dev);
+	dev_put(br_dev);
+	if (!b_pvt) {
+		nss_bridge_mgr_warn("%px: bridge instance not found\n", event->br);
+		return NOTIFY_DONE;
+	}
+
+	memset(&entry, 0, sizeof(entry));
+	memcpy(&entry.addr, event->addr, ETH_ALEN);
+	entry.fid = b_pvt->vsi;
+	if (SW_OK != fal_fdb_entry_del_bymac(NSS_BRIDGE_MGR_SWITCH_ID, &entry)) {
+		nss_bridge_mgr_warn("%px: FDB entry delete failed with MAC %pM and fid %d\n",
+				    b_pvt, &entry.addr, entry.fid);
+		return NOTIFY_DONE;
+	}
+	return NOTIFY_OK;
+}
+
+/*
+ * Notifier block for FDB update
+ */
+static struct notifier_block nss_bridge_mgr_fdb_update_notifier = {
+	.notifier_call = nss_bridge_mgr_fdb_update_callback,
+};
+
+/*
+ * nss_bridge_mgr_wan_inf_add_handler
+ *	Marks an interface as a WAN interface for special handling by bridge.
+ */
+static int nss_bridge_mgr_wan_intf_add_handler(struct ctl_table *table,
+						int write, void __user *buffer,
+						size_t *lenp, loff_t *ppos)
+{
+	struct net_device *dev;
+	char *dev_name;
+	char *if_name;
+	int32_t if_num;
+	int ret;
+
+	/*
+	 * Find the string, return an error if not found
+	 */
+	ret = proc_dostring(table, write, buffer, lenp, ppos);
+	if (ret || !write) {
+		return ret;
+	}
+
+	if_name = br_mgr_ctx.wan_ifname;
+	dev_name = strsep(&if_name, " ");
+	dev = dev_get_by_name(&init_net, dev_name);
+	if (!dev) {
+		nss_bridge_mgr_warn("Cannot find the net device associated with %s\n", dev_name);
+		return -ENODEV;
+	}
+
+	if_num = nss_cmn_get_interface_number_by_dev(dev);
+	if (!NSS_BRIDGE_MGR_IF_IS_TYPE_PHYSICAL(if_num)) {
+		dev_put(dev);
+		nss_bridge_mgr_warn("Only physical interfaces can be marked as WAN interface: if_num %d\n", if_num);
+		return -ENOMSG;
+	}
+
+	if (br_mgr_ctx.wan_if_num != -1) {
+		dev_put(dev);
+		nss_bridge_mgr_warn("Cannot overwrite a pre-existing wan interface\n");
+		return -ENOMSG;
+	}
+
+	br_mgr_ctx.wan_if_num = if_num;
+	dev_put(dev);
+	nss_bridge_mgr_always("For adding if_num: %d as WAN interface, do a network restart\n", if_num);
+	return ret;
+}
+
+/*
+ * nss_bridge_mgr_wan_inf_del_handler
+ *	Un-marks an interface as a WAN interface.
+ */
+static int nss_bridge_mgr_wan_intf_del_handler(struct ctl_table *table,
+						int write, void __user *buffer,
+						size_t *lenp, loff_t *ppos)
+{
+	struct net_device *dev;
+	char *dev_name;
+	char *if_name;
+	int32_t if_num;
+	int ret;
+
+	ret = proc_dostring(table, write, buffer, lenp, ppos);
+	if (ret)
+		return ret;
+
+	if (!write)
+		return ret;
+
+	if_name = br_mgr_ctx.wan_ifname;
+	dev_name = strsep(&if_name, " ");
+	dev = dev_get_by_name(&init_net, dev_name);
+	if (!dev) {
+		nss_bridge_mgr_warn("Cannot find the net device associated with %s\n", dev_name);
+		return -ENODEV;
+	}
+
+	if_num = nss_cmn_get_interface_number_by_dev(dev);
+	if (!NSS_BRIDGE_MGR_IF_IS_TYPE_PHYSICAL(if_num)) {
+		dev_put(dev);
+		nss_bridge_mgr_warn("Only physical interfaces can be marked/unmarked, if_num: %d\n", if_num);
+		return -ENOMSG;
+	}
+
+	if (br_mgr_ctx.wan_if_num != if_num) {
+		dev_put(dev);
+		nss_bridge_mgr_warn("This interface is not marked as a WAN interface\n");
+		return -ENOMSG;
+	}
+
+	br_mgr_ctx.wan_if_num = -1;
+	dev_put(dev);
+	nss_bridge_mgr_always("For deleting if_num: %d as WAN interface, do a network restart\n", if_num);
+	return ret;
+}
+
+static struct ctl_table nss_bridge_mgr_table[] = {
+	{
+		.procname	= "add_wanif",
+		.data           = &br_mgr_ctx.wan_ifname,
+		.maxlen         = sizeof(char) * IFNAMSIZ,
+		.mode           = 0644,
+		.proc_handler   = &nss_bridge_mgr_wan_intf_add_handler,
+	},
+	{
+		.procname	= "del_wanif",
+		.data           = &br_mgr_ctx.wan_ifname,
+		.maxlen         = sizeof(char) * IFNAMSIZ,
+		.mode           = 0644,
+		.proc_handler   = &nss_bridge_mgr_wan_intf_del_handler,
+	},
+	{ }
+};
+
+static struct ctl_table nss_bridge_mgr_dir[] = {
+	{
+		.procname	= "bridge_mgr",
+		.mode		= 0555,
+		.child		= nss_bridge_mgr_table,
+	},
+	{ }
+};
+
+static struct ctl_table nss_bridge_mgr_root_dir[] = {
+	{
+		.procname	= "nss",
+		.mode		= 0555,
+		.child		= nss_bridge_mgr_dir,
+	},
+	{ }
+};
+#endif
+
+/*
+ * nss_bridge_mgr_init_module()
+ *	bridge_mgr module init function
+ */
+int __init nss_bridge_mgr_init_module(void)
+{
+	/*
+	 * Monitor bridge activity only on supported platform
+	 */
+	if (!of_machine_is_compatible("qcom,ipq807x") && !of_machine_is_compatible("qcom,ipq6018") && !of_machine_is_compatible("qcom,ipq8074"))
+		return 0;
+
+	INIT_LIST_HEAD(&br_mgr_ctx.list);
+	spin_lock_init(&br_mgr_ctx.lock);
+	register_netdevice_notifier(&nss_bridge_mgr_netdevice_nb);
+	nss_bridge_mgr_info("Module (Build %s) loaded\n", NSS_CLIENT_BUILD_ID);
+#if defined(NSS_BRIDGE_MGR_PPE_SUPPORT)
+	br_mgr_ctx.wan_if_num = -1;
+	br_fdb_update_register_notify(&nss_bridge_mgr_fdb_update_notifier);
+	br_mgr_ctx.nss_bridge_mgr_header = register_sysctl_table(nss_bridge_mgr_root_dir);
+
+	/*
+	 * Enable ACL rule to enable L2 exception. This is needed if PPE Virtual ports is added to bridge.
+	 *  It is assumed that VP is using flow based bridging, hence L2 exceptions will need to be enabled on PPE bridge.
+	 */
+	if (!nss_bridge_mgr_l2_exception_acl_enable()) {
+		nss_bridge_mgr_warn("Failed to enable ACL\n");
+	}
+#endif
+#if defined (NSS_BRIDGE_MGR_OVS_ENABLE)
+	nss_bridge_mgr_ovs_init();
+#endif
+	return 0;
+}
+
+/*
+ * nss_bridge_mgr_exit_module()
+ *	bridge_mgr module exit function
+ */
+void __exit nss_bridge_mgr_exit_module(void)
+{
+	unregister_netdevice_notifier(&nss_bridge_mgr_netdevice_nb);
+	nss_bridge_mgr_info("Module unloaded\n");
+#if defined(NSS_BRIDGE_MGR_PPE_SUPPORT)
+	br_fdb_update_unregister_notify(&nss_bridge_mgr_fdb_update_notifier);
+
+	if (br_mgr_ctx.nss_bridge_mgr_header) {
+		unregister_sysctl_table(br_mgr_ctx.nss_bridge_mgr_header);
+	}
+
+	/*
+	 * Disable the PPE L2 exceptions which were enabled during module init for PPE virtual ports.
+	 */
+	nss_bridge_mgr_l2_exception_acl_disable();
+
+#endif
+#if defined (NSS_BRIDGE_MGR_OVS_ENABLE)
+	nss_bridge_mgr_ovs_exit();
+#endif
+}
+
+module_init(nss_bridge_mgr_init_module);
+module_exit(nss_bridge_mgr_exit_module);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("NSS bridge manager");
+
+module_param(ovs_enabled, bool, 0644);
+MODULE_PARM_DESC(ovs_enabled, "OVS bridge is enabled");
diff --git a/qca-nss-clients/bridge/nss_bridge_mgr_ovs.c b/qca-nss-clients/bridge/nss_bridge_mgr_ovs.c
new file mode 100644
index 0000000..b1468c7
--- /dev/null
+++ b/qca-nss-clients/bridge/nss_bridge_mgr_ovs.c
@@ -0,0 +1,200 @@
+/*
+ **************************************************************************
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all copies.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ **************************************************************************
+ */
+
+/*
+ * nss_bridge_mgr_ovs.c
+ *	Handle OVS bridge notifications.
+ */
+#include <linux/netdevice.h>
+#include <linux/notifier.h>
+#include <ovsmgr.h>
+#include <nss_vlan_mgr.h>
+
+#include "nss_bridge_mgr_priv.h"
+
+/*
+ * nss_bridge_mgr_ovs_handle_port_event()
+ *	Handle OVS bridge port events
+ */
+static int nss_bridge_mgr_ovs_handle_port_event(struct ovsmgr_notifiers_info *ovs_info, unsigned long event)
+{
+	struct ovsmgr_dp_port_info *port;
+	struct nss_bridge_pvt *b_pvt;
+	struct net_device *master_dev, *dev;
+	int err;
+
+	port = ovs_info->port;
+	if (!port || !port->master || !port->dev) {
+		nss_bridge_mgr_warn("%px: Invalid ovs_info\n", ovs_info);
+		return -EINVAL;
+	}
+
+	master_dev = port->master;
+	dev = port->dev;
+
+	/*
+	 * Check if upper_dev is a known bridge.
+	 */
+	b_pvt = nss_bridge_mgr_find_instance(master_dev);
+	if (!b_pvt) {
+		nss_bridge_mgr_warn("%px: Couldn't find bridge instance for master: %s\n", port, master_dev->name);
+		return -ENOENT;
+	}
+
+	/*
+	 * add port to the bridge.
+	 */
+	if (event == OVSMGR_DP_PORT_ADD) {
+		nss_bridge_mgr_trace("%px: Interface %s joining bridge %s\n", b_pvt, dev->name, master_dev->name);
+
+		err = nss_bridge_mgr_join_bridge(dev, b_pvt);
+		if (err) {
+			nss_bridge_mgr_warn("%px: Interface %s failed to join bridge %s\n", b_pvt, dev->name, master_dev->name);
+			return err;
+		}
+
+		return 0;
+	}
+
+	/*
+	 * delete port from bridge.
+	 */
+	nss_bridge_mgr_trace("%px: Interface %s leaving bridge %s\n", b_pvt, dev->name, master_dev->name);
+
+	err = nss_bridge_mgr_leave_bridge(dev, b_pvt);
+	if (err) {
+		nss_bridge_mgr_warn("%px: Interface %s failed to leave bridge %s\n", b_pvt, dev->name, master_dev->name);
+		return err;
+	}
+
+	return 0;
+}
+
+/*
+ * nss_bridge_mgr_ovs_handle_vlan_event()
+ *	Handle VLAN events OVS bridge port
+ */
+static void nss_bridge_mgr_ovs_handle_vlan_event(struct ovsmgr_notifiers_info *ovs_info, unsigned long event)
+{
+	struct ovsmgr_dp_port_vlan_info *vlan;
+	struct nss_bridge_pvt *b_pvt;
+	struct net_device *master_dev, *dev;
+
+	vlan = ovs_info->vlan;
+	if (!vlan || !vlan->master || !vlan->dev) {
+		nss_bridge_mgr_warn("%px: Invalid ovs_info\n", ovs_info);
+		return;
+	}
+
+	master_dev = vlan->master;
+	dev = vlan->dev;
+
+	/*
+	 * Check if upper_dev is a known bridge.
+	 */
+	b_pvt = nss_bridge_mgr_find_instance(master_dev);
+	if (!b_pvt) {
+		nss_bridge_mgr_warn("%px: Couldn't find bridge instance for master: %s\n", vlan, master_dev->name);
+		return;
+	}
+
+	if (event == OVSMGR_DP_VLAN_ADD) {
+		/*
+		 * add VLAN in bridge.
+		 */
+		nss_bridge_mgr_trace("%px: VLAN = %d, add on port %s, bridge %s\n",
+				b_pvt, vlan->vh.h_vlan_TCI, dev->name, master_dev->name);
+
+		nss_vlan_mgr_add_vlan_rule(dev, b_pvt->vsi, vlan->vh.h_vlan_TCI);
+		return;
+	}
+
+	/*
+	 * delete VLAN from bridge.
+	 */
+	nss_bridge_mgr_trace("%px: VLAN = %d, delete on port %s, bridge %s\n",
+					b_pvt, vlan->vh.h_vlan_TCI, dev->name, master_dev->name);
+	nss_vlan_mgr_del_vlan_rule(dev, b_pvt->vsi, vlan->vh.h_vlan_TCI);
+}
+
+/*
+ * nss_bridge_mgr_ovs_notifier_callback()
+ *	Netdevice notifier callback to inform us of change of state of a netdevice
+ */
+static int nss_bridge_mgr_ovs_notifier_callback(struct notifier_block *nb, unsigned long event, void *data)
+{
+	struct ovsmgr_notifiers_info *ovs_info = (struct ovsmgr_notifiers_info *)data;
+
+	nss_bridge_mgr_info("OVS notifier event: %lu\n", event);
+
+	switch (event) {
+	case OVSMGR_DP_BR_ADD:
+		nss_bridge_mgr_register_br(ovs_info->dev);
+		break;
+	case OVSMGR_DP_BR_DEL:
+		nss_bridge_mgr_unregister_br(ovs_info->dev);
+		break;
+	case OVSMGR_DP_PORT_ADD:
+	case OVSMGR_DP_PORT_DEL:
+		nss_bridge_mgr_ovs_handle_port_event(ovs_info, event);
+		break;
+	case OVSMGR_DP_VLAN_ADD:
+	case OVSMGR_DP_VLAN_DEL:
+		nss_bridge_mgr_ovs_handle_vlan_event(ovs_info, event);
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+/*
+ * struct notifier_block nss_bridge_mgr_ovs_notifier
+ *	Registration for OVS events
+ */
+static struct notifier_block ovs_notifier __read_mostly = {
+	.notifier_call = nss_bridge_mgr_ovs_notifier_callback,
+};
+
+/*
+ * nss_bridge_mgr_is_ovs_port()
+ *	Return true if dev is an OVS port.
+ */
+int nss_bridge_mgr_is_ovs_port(struct net_device *dev)
+{
+	if (dev->priv_flags & IFF_OVS_DATAPATH) {
+		return true;
+	}
+
+	return false;
+}
+
+/*
+ * nss_bridge_mgr_ovs_exit()
+ *	Cleanup OVS bridge handlers.
+ */
+void nss_bridge_mgr_ovs_exit(void)
+{
+	ovsmgr_notifier_unregister(&ovs_notifier);
+}
+
+/*
+ * nss_bridge_mgr_exit_module()
+ *	Initialize OVS bridge handlers.
+ */
+void nss_bridge_mgr_ovs_init(void)
+{
+	ovsmgr_notifier_register(&ovs_notifier);
+}
diff --git a/qca-nss-clients/bridge/nss_bridge_mgr_priv.h b/qca-nss-clients/bridge/nss_bridge_mgr_priv.h
new file mode 100644
index 0000000..2ca505e
--- /dev/null
+++ b/qca-nss-clients/bridge/nss_bridge_mgr_priv.h
@@ -0,0 +1,131 @@
+/*
+ **************************************************************************
+ * Copyright (c) 2020, The Linux Foundation. All rights reserved.
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all copies.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ **************************************************************************
+ */
+
+#ifndef _NSS_BRIDGE_MGR_PRIV_H_
+#define _NSS_BRIDGE_MGR_PRIV_H_
+
+#if (NSS_BRIDGE_MGR_DEBUG_LEVEL < 1)
+#define nss_bridge_mgr_assert(fmt, args...)
+#else
+#define nss_bridge_mgr_assert(c) BUG_ON(!(c))
+#endif /* NSS_BRIDGE_MGR_DEBUG_LEVEL */
+
+/*
+ * Compile messages for dynamic enable/disable
+ */
+#define nss_bridge_mgr_always(s, ...) pr_alert(s, ##__VA_ARGS__)
+
+#if defined(CONFIG_DYNAMIC_DEBUG)
+#define nss_bridge_mgr_warn(s, ...) \
+		pr_debug("%s[%d]:" s, __func__, __LINE__, ##__VA_ARGS__)
+#define nss_bridge_mgr_info(s, ...) \
+		pr_debug("%s[%d]:" s, __func__, __LINE__, ##__VA_ARGS__)
+#define nss_bridge_mgr_trace(s, ...) \
+		pr_debug("%s[%d]:" s, __func__, __LINE__, ##__VA_ARGS__)
+#else /* CONFIG_DYNAMIC_DEBUG */
+/*
+ * Statically compile messages at different levels
+ */
+#if (NSS_BRIDGE_MGR_DEBUG_LEVEL < 2)
+#define nss_bridge_mgr_warn(s, ...)
+#else
+#define nss_bridge_mgr_warn(s, ...) \
+		pr_warn("%s[%d]:" s, __func__, __LINE__, ##__VA_ARGS__)
+#endif
+
+#if (NSS_BRIDGE_MGR_DEBUG_LEVEL < 3)
+#define nss_bridge_mgr_info(s, ...)
+#else
+#define nss_bridge_mgr_info(s, ...) \
+		pr_notice("%s[%d]:" s, __func__, __LINE__, ##__VA_ARGS__)
+#endif
+
+#if (NSS_BRIDGE_MGR_DEBUG_LEVEL < 4)
+#define nss_bridge_mgr_trace(s, ...)
+#else
+#define nss_bridge_mgr_trace(s, ...) \
+		pr_info("%s[%d]:" s, __func__, __LINE__, ##__VA_ARGS__)
+#endif
+#endif /* CONFIG_DYNAMIC_DEBUG */
+
+/*
+ * nss interface check
+ */
+#define NSS_BRIDGE_MGR_PHY_PORT_MIN 1
+#define NSS_BRIDGE_MGR_PHY_PORT_MAX 6
+#define NSS_BRIDGE_MGR_IF_IS_TYPE_PHYSICAL(if_num) \
+	(((if_num) >= NSS_BRIDGE_MGR_PHY_PORT_MIN) && \
+	((if_num) <= NSS_BRIDGE_MGR_PHY_PORT_MAX))
+
+#if defined(NSS_BRIDGE_MGR_PPE_SUPPORT)
+#define NSS_BRIDGE_MGR_SWITCH_ID	0
+#define NSS_BRIDGE_MGR_SPANNING_TREE_ID	0
+#define NSS_BRIDGE_MGR_DISABLE_PPE_EXCEPTION	0
+#define NSS_BRIDGE_MGR_ENABLE_PPE_EXCEPTION	1
+
+#define NSS_BRIDGE_MGR_ACL_DEV_ID 0
+#define NSS_BRIDGE_MGR_ACL_LIST_ID 61
+#define NSS_BRIDGE_MGR_ACL_LIST_PRIORITY 0
+#define NSS_BRIDGE_MGR_ACL_RULE_NR 1
+#define NSS_BRIDGE_MGR_ACL_FRAG_RULE_ID 0
+#define NSS_BRIDGE_MGR_ACL_FIN_RULE_ID 1
+#define NSS_BRIDGE_MGR_ACL_SYN_RULE_ID 2
+#define NSS_BRIDGE_MGR_ACL_RST_RULE_ID 3
+
+#endif
+
+/*
+ * bridge manager context structure
+ */
+struct nss_bridge_mgr_context {
+	struct list_head list;		/* List of bridge instance */
+	spinlock_t lock;		/* Lock to protect bridge instance */
+#if defined(NSS_BRIDGE_MGR_PPE_SUPPORT)
+	int32_t wan_if_num;		/* WAN interface number */
+	char wan_ifname[IFNAMSIZ];	/* WAN interface name */
+	struct ctl_table_header *nss_bridge_mgr_header;	/* bridge sysctl */
+#endif
+};
+
+/*
+ * bridge manager private structure
+ */
+struct nss_bridge_pvt {
+	struct list_head list;			/* List of bridge instance */
+	struct net_device *dev;			/* Bridge netdevice */
+	uint32_t ifnum;				/* Dynamic interface for bridge */
+#if defined(NSS_BRIDGE_MGR_PPE_SUPPORT)
+	uint32_t vsi;				/* VSI set for bridge */
+	uint32_t port_vsi[NSS_BRIDGE_MGR_PHY_PORT_MAX];	/* port VSI set for physical interfaces	*/
+	uint32_t lag_ports[NSS_BRIDGE_MGR_PHY_PORT_MAX];	/* List of slave ports in LAG */
+	int bond_slave_num;			/* Total number of bond devices added into
+						   bridge device */
+	bool wan_if_enabled;			/* Is WAN interface enabled? */
+	int32_t wan_if_num;			/* WAN interface number, if enabled */
+#endif
+	uint32_t mtu;				/* MTU for bridge */
+	uint8_t dev_addr[ETH_ALEN];		/* MAC address for bridge */
+};
+
+int nss_bridge_mgr_register_br(struct net_device *dev);
+int nss_bridge_mgr_unregister_br(struct net_device *dev);
+struct nss_bridge_pvt *nss_bridge_mgr_find_instance(struct net_device *dev);
+int nss_bridge_mgr_join_bridge(struct net_device *dev, struct nss_bridge_pvt *br);
+int nss_bridge_mgr_leave_bridge(struct net_device *dev, struct nss_bridge_pvt *br);
+void nss_bridge_mgr_ovs_init(void);
+void nss_bridge_mgr_ovs_exit(void);
+
+#endif
diff --git a/qca-nss-clients/build.sh b/qca-nss-clients/build.sh
new file mode 100755
index 0000000..88fb760
--- /dev/null
+++ b/qca-nss-clients/build.sh
@@ -0,0 +1,116 @@
+#!/bin/bash
+
+set -e
+set -o errtrace
+trap 'echo Fatal error: script $0 aborting at line $LINENO, command \"$BASH_COMMAND\" returned $?; exit 1' ERR
+
+PRODUCT_LIST="sirocco"
+
+NUM_JOBS=$(grep -c processor /proc/cpuinfo)
+
+MODULE_NAME="qca-nss-clients"
+###################################################
+# Setup build toollchain
+###################################################
+sdk_top_dir=$(readlink -e $(dirname $0)/..)
+TOP_DIR=$(readlink -e ${sdk_top_dir}/..)
+
+#export ARCH and build related envs.
+source ${TOP_DIR}/sdk/build_scripts/setup_env.sh
+
+kernel_path=$(readlink -e ${sdk_top_dir}/../kernel)
+qca_ssdk_include_path=$(readlink -e ${sdk_top_dir}/qca-ssdk/include)
+qca_ssdk_common_path=$(readlink -e ${sdk_top_dir}/qca-ssdk/include/common)
+qca_ssdk_sal_path=$(readlink -e ${sdk_top_dir}/qca-ssdk/include/sal/os)
+qca_ssdk_sal_linux_path=$(readlink -e ${sdk_top_dir}/qca-ssdk/include/sal/os/linux)
+qca_nss_drv_export_path=$(readlink -e ${sdk_top_dir}/qca-nss-drv/exports)
+qca_nss_drv_dependency=$(readlink -e ${sdk_top_dir}/qca-nss-drv)
+soc_type=ipq50xx
+
+export SoC=${soc_type}
+
+dtlsmgr_ver="v2.0"
+ipsecmgr_ver="v2.0"
+
+extra_cflags="-I${qca_nss_drv_export_path} -I${qca_ssdk_include_path} -I${qca_ssdk_common_path} -I${qca_ssdk_sal_path} -I${qca_ssdk_sal_linux_path}"
+
+make_modules="wifi-meshmgr"
+
+make_opts=""
+for module in ${make_modules}; do
+  make_opts="${make_opts} ${module}=y"
+done
+
+##################################################
+# Build Kernel Module
+##################################################
+function BuildKModule() {
+    # make kernel module
+    echo "Build ${MODULE_NAME}"
+    ${CROSS_MAKE} -C ${kernel_path} ${make_opts} M=${sdk_top_dir}/${MODULE_NAME} SoC=${soc_type} EXTRA_CFLAGS=${extra_cflags} DTLSMGR_DIR=${dtlsmgr_ver} IPSECMGR_DIR=${ipsecmgr_ver} KBUILD_EXTRA_SYMBOLS=${qca_nss_drv_dependency}/Module.symvers V=1
+}
+
+##################################################
+# Build & Install
+##################################################
+function BuildAndInstall() {
+    local product=$1
+    local eureka_src_path=$(readlink -e $2)
+    if [ -z ${eureka_src_path} ]; then
+      echo "eureka_src_path doesn't exist"
+      exit 1
+    fi
+
+    # clean previous build
+    find . -name "*.o" -delete
+    find . -name "*.cmd" -delete
+    find . -name "*.ko" -delete
+    find . -name "*.mod.*" -delete
+    find . -name "*.mod" -delete
+    find . -name Module.symvers -delete
+
+    # build module
+    BuildKModule
+
+    # install module
+    echo "Install ${MODULE_NAME} modules: ${make_modules}"
+    local module_target_dir="$(GetModulePath ${eureka_src_path} ${product})"
+    mkdir -p ${module_target_dir}
+    cp -f wifi_meshmgr/qca-nss-wifi-meshmgr.ko ${module_target_dir}/.
+}
+
+function Usage() {
+    cat << EOF
+Usage:
+    $0 <product> <eureka_src_path>
+    Valid products: ${PRODUCT_LIST}
+EOF
+}
+
+function IsProductValid() {
+  local product
+  local ret=1
+
+  for product in ${PRODUCT_LIST}; do
+    if [ "${product}" == "$1" ]; then
+      ret=0
+      break
+    fi
+  done
+
+  return ${ret}
+}
+
+#########################
+####### Main Entry ######
+#########################
+if (( $# < 2 )); then
+  Usage
+else
+  if IsProductValid $1; then
+    BuildAndInstall $1 $2
+  else
+    echo "$1 is a invalid product"
+    Usage
+  fi
+fi
diff --git a/qca-nss-clients/capwapmgr/Makefile b/qca-nss-clients/capwapmgr/Makefile
new file mode 100644
index 0000000..0e1450c
--- /dev/null
+++ b/qca-nss-clients/capwapmgr/Makefile
@@ -0,0 +1,17 @@
+ccflags-y := -I$(obj)/../exports -I$(obj)/..  -I$(obj)/nss_hal/include
+ccflags-y += -DNSS_CLIENT_BUILD_ID="$(BUILD_ID)"
+
+#
+# If you want only one netdev for all the CAPWAP tunnels, then
+# set NSS_CAPWAPMGR_ONE_NETDEV.
+#     ccflags-y += -DNSS_CAPWAPMGR_ONE_NETDEV
+#
+# If you want one netdevice per-CAPWAP tunnel, then don't set
+# NSS_CAPWAPMGR_ONE_NETDEV.
+#
+obj-m += qca-nss-capwapmgr.o
+qca-nss-capwapmgr-objs := nss_capwapmgr.o
+ccflags-y += -DNSS_CAPWAPMGR_DEBUG_LEVEL=6
+
+ccflags-y += $(NSS_CCFLAGS) -DNSS_DEBUG_LEVEL=0 -DNSS_PKT_STATS_ENABLED=0
+ccflags-y += -Wall -Werror
diff --git a/qca-nss-clients/capwapmgr/nss_capwapmgr.c b/qca-nss-clients/capwapmgr/nss_capwapmgr.c
new file mode 100644
index 0000000..62068b6
--- /dev/null
+++ b/qca-nss-clients/capwapmgr/nss_capwapmgr.c
@@ -0,0 +1,3484 @@
+/*
+ **************************************************************************
+ * Copyright (c) 2014-2021, The Linux Foundation. All rights reserved.
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all copies.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ **************************************************************************
+ */
+
+/*
+ * nss_capwapmgr.c
+ *	NSS to HLOS CAPWAP manager
+ */
+#include <linux/types.h>
+#include <linux/ip.h>
+#include <linux/of.h>
+#include <linux/tcp.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <net/ipv6.h>
+#include <linux/version.h>
+
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3,9,0))
+#include <net/ipip.h>
+#else
+#include <net/ip_tunnels.h>
+#endif
+
+#include <linux/if_arp.h>
+#include <linux/etherdevice.h>
+#include <nss_api_if.h>
+#include <linux/in.h>
+#include <nss_cmn.h>
+#include <nss_capwap.h>
+#include <nss_capwapmgr.h>
+#include <nss_capwap_user.h>
+#include <fal/fal_qos.h>
+#include <fal/fal_acl.h>
+
+#define NSS_CAPWAPMGR_NETDEV_NAME	"nsscapwap"
+
+/*
+ * This file is responsible for interacting with qca-nss-drv's
+ * CAPWAP API to manage CAPWAP tunnels.
+ *
+ * This driver also exposes few APIs which can be used by
+ * another module to perform operations on CAPWAP tunnels. However, we create
+ * one netdevice for all the CAPWAP tunnels which is done at the module's
+ * init time if NSS_CAPWAPMGR_ONE_NETDEV is set in the Makefile.
+ *
+ * If your requirement is to create one netdevice per-CAPWAP tunnel, then
+ * netdevice needs to be created before CAPWAP tunnel create. Netdevice are
+ * created using nss_capwapmgr_netdev_create() API.
+ *
+ */
+
+/*
+ * NSS capwap mgr macros
+ */
+#define NSS_CAPWAPMGR_NORMAL_FRAME_MTU 1500
+
+/*
+ * Ethernet types.
+ */
+#define NSS_CAPWAPMGR_ETH_TYPE_MASK 0xFFFF
+#define NSS_CAPWAPMGR_ETH_TYPE_TRUSTSEC 0x8909
+#define NSS_CAPWAPMGR_ETH_TYPE_IPV4 ETH_P_IP
+#define NSS_CAPWAPMGR_ETH_TYPE_IPV6 ETH_P_IPV6
+#define NSS_CAPWAPMGR_DSCP_MAX 64
+
+/*
+ * ACL specific parameters.
+ */
+#define NSS_CAPWAPMGR_ETH_HDR_OFFSET 6
+#define NSS_CAPWAPMGR_IPV4_OFFSET 8
+#define NSS_CAPWAPMGR_DSCP_MASK_IPV4_SHIFT 2
+#define NSS_CAPWAPMGR_DSCP_MASK_IPV6_SHIFT 6
+#define NSS_CAPWAPMGR_DEV_ID 0
+#define NSS_CAPWAPMGR_GROUP_ID 0
+#define NSS_CAPWAPMGR_RULE_NR 1
+
+/*
+ * ACL rule bind bitmap for all physical ports (1 through 6)
+ */
+#define NSS_CAPWAPMGR_BIND_BITMAP 0x7E
+
+/*
+ * We need 4 ACL rules - 2 rules for each v4 and v6 classification.
+ */
+#define NSS_CAPWAPMGR_ACL_RULES_PER_LIST 4
+
+/*
+ * We currently have list-id 60 reserved for this purpose.
+ * TODO: Find a better approach to reserve list-id.
+ */
+#define NSS_CAPWAPMGR_ACL_LIST_START 60
+#define NSS_CAPWAPMGR_ACL_LIST_CNT 1
+
+#define NSS_CAPWAPMGR_NORMAL_FRAME_MTU 1500
+
+#if (NSS_CAPWAPMGR_DEBUG_LEVEL < 1)
+#define nss_capwapmgr_assert(fmt, args...)
+#else
+#define nss_capwapmgr_assert(c) if (!(c)) { BUG_ON(!(c)); }
+#endif /* NSS_CAPWAPMGR_DEBUG_LEVEL */
+
+/*
+ * Compile messages for dynamic enable/disable
+ */
+#if defined(CONFIG_DYNAMIC_DEBUG)
+#define nss_capwapmgr_warn(s, ...) pr_debug("%s[%d]:" s, __FUNCTION__, __LINE__, ##__VA_ARGS__)
+#define nss_capwapmgr_info(s, ...) pr_debug("%s[%d]:" s, __FUNCTION__, __LINE__, ##__VA_ARGS__)
+#define nss_capwapmgr_trace(s, ...) pr_debug("%s[%d]:" s, __FUNCTION__, __LINE__, ##__VA_ARGS__)
+#else /* CONFIG_DYNAMIC_DEBUG */
+/*
+ * Statically compile messages at different levels
+ */
+#if (NSS_CAPWAPMGR_DEBUG_LEVEL < 2)
+#define nss_capwapmgr_warn(s, ...)
+#else
+#define nss_capwapmgr_warn(s, ...) pr_warn("%s[%d]:" s, __FUNCTION__, __LINE__, ##__VA_ARGS__)
+#endif
+
+#if (NSS_CAPWAPMGR_DEBUG_LEVEL < 3)
+#define nss_capwapmgr_info(s, ...)
+#else
+#define nss_capwapmgr_info(s, ...)   pr_notice("%s[%d]:" s, __FUNCTION__, __LINE__, ##__VA_ARGS__)
+#endif
+
+#if (NSS_CAPWAPMGR_DEBUG_LEVEL < 4)
+#define nss_capwapmgr_trace(s, ...)
+#else
+#define nss_capwapmgr_trace(s, ...)  pr_info("%s[%d]:" s, __FUNCTION__, __LINE__, ##__VA_ARGS__)
+#endif
+#endif /* CONFIG_DYNAMIC_DEBUG */
+
+/*
+ * nss_capwapmgr_ip_response
+ *	Response structure for IPv4 and IPv6 messages
+ */
+static struct nss_capwapmgr_ip_response {
+	struct semaphore sem;
+	wait_queue_head_t wq;
+	enum nss_cmn_response response;
+	bool cond;
+} ip_response;
+
+/*
+ * nss_capwapmgr_acl
+ *	Object containing rule related info.
+ */
+struct nss_capwapmgr_acl {
+	bool in_use;			/* Set when rule is in use. */
+	uint8_t uid;			/* Unique ID for this rule object. */
+	uint8_t list_id;		/* List on which this rule resides. */
+	uint8_t rule_id;		/* Rule-id of this rule. */
+	uint8_t dscp_value;		/* DSCP value */
+	uint8_t dscp_mask;		/* DSCP mask */
+};
+
+/*
+ * nss_capwapmgr_acl_list
+ */
+struct nss_capwapmgr_acl_list {
+	struct nss_capwapmgr_acl rule[NSS_CAPWAPMGR_ACL_RULES_PER_LIST];
+					/* Rules on this ACL list. */
+};
+
+/*
+ * nss_capwapmgr_global
+ *	Global structure for capwapmgr.
+ */
+static struct nss_capwapmgr_global {
+	uint32_t count;				/* Counter for driver queue selection. */
+	struct nss_capwap_tunnel_stats tunneld;	/* What tunnels that don't exist any more. */
+	struct nss_capwapmgr_acl_list acl_list[NSS_CAPWAPMGR_ACL_LIST_CNT];
+						/* Set when ACL rule is in use. */
+} global;
+
+static void nss_capwapmgr_receive_pkt(struct net_device *dev, struct sk_buff *skb, struct napi_struct *napi);
+
+#if defined(NSS_CAPWAPMGR_ONE_NETDEV)
+/*
+ * If you want only one netdev for all the tunnels. If you don't want
+ * to create one netdev for all the tunnels, then netdev must be
+ * created using nss_capwapmgr_netdev_create() before every tunnel create
+ * operation.
+ */
+static struct net_device *nss_capwapmgr_ndev = NULL;
+#endif
+
+/*
+ * nss_capwapmgr_open()
+ *	Netdev's open call.
+ */
+static int nss_capwapmgr_open(struct net_device *dev)
+{
+	netif_start_queue(dev);
+	return 0;
+}
+
+/*
+ * nss_capwapmgr_close()
+ *	Netdev's close call.
+ */
+static int nss_capwapmgr_close(struct net_device *dev)
+{
+	netif_stop_queue(dev);
+	return 0;
+}
+
+/*
+ * nss_capwapmgr_decongestion_callback()
+ *	Wakeup netif queue if we were stopped by start_xmit
+ */
+static void nss_capwapmgr_decongestion_callback(void *arg)
+{
+	struct net_device *dev = arg;
+
+	if (netif_queue_stopped(dev)) {
+		netif_wake_queue(dev);
+	}
+}
+
+/*
+ * nss_capwapmgr_start_xmit()
+ *	Transmit's skb to NSS FW over CAPWAP if_num_inner.
+ *
+ * Please make sure to leave headroom of NSS_CAPWAP_HEADROOM with every
+ * packet so that NSS can encap eth,vlan,ip,udp,capwap headers.
+ * Also, skb->len must include size of metaheader. Essentially skb->len is
+ * size of CAPWAP Payload (including wireless info sections) and metaheader.
+ */
+static netdev_tx_t nss_capwapmgr_start_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct net_device_stats *stats = &dev->stats;
+	struct nss_capwapmgr_priv *priv;
+	struct nss_capwap_metaheader *pre;
+	uint32_t if_num_inner;
+	nss_tx_status_t status;
+
+	priv = netdev_priv(dev);
+	pre = (struct nss_capwap_metaheader *)skb->data;
+
+	if (unlikely(pre->tunnel_id > NSS_CAPWAPMGR_MAX_TUNNELS)) {
+		nss_capwapmgr_warn("%px: (CAPWAP packet) tunnel-id invalid: %d\n", dev, pre->tunnel_id);
+		kfree_skb(skb);
+		stats->tx_dropped++;
+		return NETDEV_TX_OK;
+	}
+
+	if_num_inner = priv->tunnel[pre->tunnel_id].if_num_inner;
+	if (unlikely(if_num_inner == -1)) {
+		nss_capwapmgr_warn("%px: (CAPWAP packet) if_num_inner in the tunnel not set pre->tunnel_id %d\n", dev,
+				pre->tunnel_id);
+		kfree_skb(skb);
+		stats->tx_dropped++;
+		return NETDEV_TX_OK;
+	}
+
+	/*
+	 * We use the lowest bit in the inner flow_id to determine which Tx ring
+	 * to use (drv uses queue mapping to select Tx ring).
+	 *
+	 * This ring distribution will in turn get used in NSS firmware
+	 * for better thread distribution of encap operation.
+	 */
+	skb_set_queue_mapping(skb, pre->flow_id & 0x1);
+
+	status = nss_capwap_tx_buf(priv->nss_ctx, skb, if_num_inner);
+	if (unlikely(status != NSS_TX_SUCCESS)) {
+		if (status == NSS_TX_FAILURE_QUEUE) {
+			nss_capwapmgr_warn("%px: netdev :%px queue is full", dev, dev);
+			if (!netif_queue_stopped(dev)) {
+				netif_stop_queue(dev);
+			}
+		}
+
+		return NETDEV_TX_BUSY;
+	}
+
+	return NETDEV_TX_OK;
+}
+
+/*
+ * nss_capwapmgr_fill_up_stats()
+ *	Fills up stats in netdev's stats.
+ */
+static void nss_capwapmgr_fill_up_stats(struct rtnl_link_stats64 *stats, struct nss_capwap_tunnel_stats *tstats)
+{
+	stats->rx_packets += tstats->pnode_stats.rx_packets;
+	stats->rx_dropped += tstats->pnode_stats.rx_dropped;
+
+	/* rx_fifo_errors will appear as rx overruns in ifconfig */
+	stats->rx_fifo_errors += (tstats->rx_n2h_drops + tstats->rx_n2h_queue_full_drops);
+	stats->rx_errors += (tstats->rx_mem_failure_drops + tstats->rx_oversize_drops + tstats->rx_frag_timeout_drops);
+	stats->rx_bytes += tstats->pnode_stats.rx_bytes;
+
+	/* tx_fifo_errors  will appear as tx overruns in ifconfig */
+	stats->tx_fifo_errors += tstats->tx_queue_full_drops;
+	stats->tx_errors += tstats->tx_mem_failure_drops;
+	stats->tx_bytes += tstats->pnode_stats.tx_bytes;
+
+	stats->tx_dropped += (tstats->tx_dropped_sg_ref + tstats->tx_dropped_ver_mis + tstats->tx_dropped_hroom
+			 + tstats->tx_dropped_dtls + tstats->tx_dropped_nwireless);
+	stats->tx_packets += tstats->pnode_stats.tx_packets;
+}
+
+/*
+ * nss_capwapmgr_get_tunnel_stats()
+ *	Netdev get stats function to get tunnel stats
+ */
+static struct rtnl_link_stats64 *nss_capwapmgr_get_tunnel_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+	struct nss_capwap_tunnel_stats tstats;
+	int i;
+
+	if (!stats) {
+		nss_capwapmgr_warn("%px: invalid rtnl structure\n", dev);
+		return stats;
+	}
+
+	/*
+	 * Netdev seems to be incrementing rx_dropped because we don't give IP header.
+	 * So reset it as it's of no use for us.
+	 */
+	atomic_long_set(&dev->rx_dropped, 0);
+
+	memset(stats, 0, sizeof (struct rtnl_link_stats64));
+	nss_capwapmgr_fill_up_stats(stats, &global.tunneld);
+
+	for (i = NSS_DYNAMIC_IF_START; i <= (NSS_DYNAMIC_IF_START + NSS_MAX_DYNAMIC_INTERFACES); i++) {
+		if (nss_capwap_get_stats(i, &tstats) == false) {
+			continue;
+		}
+
+		nss_capwapmgr_fill_up_stats(stats, &tstats);
+	}
+
+	return stats;
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0))
+/*
+ * nss_capwapmgr_dev_tunnel_stats()
+ *	Netdev ops function to retrieve stats for kernel version < 4.6
+ */
+static struct rtnl_link_stats64 *nss_capwapmgr_dev_tunnel_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+	return nss_capwapmgr_get_tunnel_stats(dev, stats);
+}
+#else
+/*
+ * nss_capwapmgr_dev_tunnel_stats()
+ *	Netdev ops function to retrieve stats for kernel version > 4.6
+ */
+static void nss_capwapmgr_dev_tunnel_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+	nss_capwapmgr_get_tunnel_stats(dev, stats);
+}
+#endif
+
+/*
+ * nss_capwapmgr_netdev_ops
+ *	Netdev operations.
+ */
+static const struct net_device_ops nss_capwapmgr_netdev_ops = {
+	.ndo_open		= nss_capwapmgr_open,
+	.ndo_stop		= nss_capwapmgr_close,
+	.ndo_start_xmit		= nss_capwapmgr_start_xmit,
+	.ndo_set_mac_address	= eth_mac_addr,
+	.ndo_change_mtu		= eth_change_mtu,
+	.ndo_get_stats64	= nss_capwapmgr_dev_tunnel_stats,
+};
+
+/*
+ * nss_capwapmgr_dummpy_netdev_setup()
+ *	Netdev setup function.
+ */
+static void nss_capwapmgr_dummpy_netdev_setup(struct net_device *dev)
+{
+	dev->addr_len = ETH_ALEN;
+	dev->mtu = ETH_DATA_LEN;
+	dev->needed_headroom = NSS_CAPWAP_HEADROOM;
+	dev->needed_tailroom = 4;
+	dev->type = ARPHRD_VOID;
+	dev->ethtool_ops = NULL;
+	dev->header_ops = NULL;
+	dev->netdev_ops = &nss_capwapmgr_netdev_ops;
+
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 11, 8))
+	dev->destructor = NULL;
+#else
+	dev->priv_destructor = NULL;
+#endif
+	memcpy(dev->dev_addr, "\x00\x00\x00\x00\x00\x00", dev->addr_len);
+	memset(dev->broadcast, 0xff, dev->addr_len);
+	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+}
+
+/*
+ * nss_capwapmgr_msg_event_receive()
+ *	CAPWAP message callback for responses to commands sent to NSS FW
+ *
+ * This is command hanlder for all the messages since all we do is wake-up
+ * the caller who is sending message to NSS FW.
+ */
+static void nss_capwapmgr_msg_event_receive(void *app_data, struct nss_capwap_msg *nim)
+{
+	struct net_device *dev = app_data;
+	struct nss_cmn_msg *ncm = (struct nss_cmn_msg *)nim;
+	struct nss_capwapmgr_response *r;
+	struct nss_capwapmgr_priv *priv;
+	uint32_t if_num;
+
+	if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) {
+		return;
+	}
+
+	/*
+	 * Since all CAPWAP messages are sync in nature we need to wake-up caller.
+	 */
+	if_num = ncm->interface - NSS_DYNAMIC_IF_START;
+	dev_hold(dev);
+	priv = netdev_priv(dev);
+	r = &priv->resp[if_num];
+
+	/*
+	 * If somebody is waiting...
+	 */
+	if (atomic_read(&r->seq) != 0) {
+		if (ncm->response != NSS_CMN_RESPONSE_ACK) {
+			r->error = ncm->error;
+		}
+
+		r->response = ncm->response;
+		atomic_dec(&r->seq);
+		wake_up(&r->wq);
+	}
+
+	dev_put(dev);
+}
+/*
+ * nss_capwapmgr_ip_common_handler()
+ *	Common Callback handler for IPv4 and IPv6 messages
+ */
+static void nss_capwapmgr_ip_common_handler(struct nss_cmn_msg *ncm)
+{
+	if (ncm->response == NSS_CMN_RESPONSE_NOTIFY) {
+		return;
+	}
+
+	ip_response.response = ncm->response;
+	ip_response.cond = 0;
+	wake_up(&ip_response.wq);
+}
+
+/*
+ * nss_capwapmgr_ipv4_handler()
+ *	Callback handler for IPv4 messages
+ */
+static void nss_capwapmgr_ipv4_handler(void *app_data, struct nss_ipv4_msg *nim)
+{
+	nss_capwapmgr_ip_common_handler(&nim->cm);
+}
+
+/*
+ * nss_capwapmgr_ipv6_handler()
+ *	Callback handler for IPv4 messages
+ */
+static void nss_capwapmgr_ipv6_handler(void *app_data, struct nss_ipv6_msg *nim)
+{
+	nss_capwapmgr_ip_common_handler(&nim->cm);
+}
+
+/*
+ * nss_capwap_remap_error()
+ *	Remaps NSS FW response error to nss_capwapmgr_status_t
+ */
+static nss_capwapmgr_status_t nss_capwap_remap_error(nss_capwap_msg_response_t error)
+{
+	nss_capwapmgr_status_t status;
+
+	switch (error) {
+	case NSS_CAPWAP_ERROR_MSG_INVALID_REASSEMBLY_TIMEOUT:
+		status = NSS_CAPWAPMGR_FAILURE_INVALID_REASSEMBLY_TIMEOUT;
+		break;
+	case NSS_CAPWAP_ERROR_MSG_INVALID_PATH_MTU:
+		status = NSS_CAPWAPMGR_FAILURE_INVALID_PATH_MTU;
+		break;
+	case NSS_CAPWAP_ERROR_MSG_INVALID_MAX_FRAGMENT:
+		status = NSS_CAPWAPMGR_FAILURE_INVALID_MAX_FRAGMENT;
+		break;
+	case NSS_CAPWAP_ERROR_MSG_INVALID_BUFFER_SIZE:
+		status = NSS_CAPWAPMGR_FAILURE_INVALID_BUFFER_SIZE;
+		break;
+	case NSS_CAPWAP_ERROR_MSG_INVALID_L3_PROTO:
+		status = NSS_CAPWAPMGR_FAILURE_INVALID_L3_PROTO;
+		break;
+	case NSS_CAPWAP_ERROR_MSG_INVALID_UDP_PROTO:
+		status = NSS_CAPWAPMGR_FAILURE_INVALID_UDP_PROTO;
+		break;
+	case NSS_CAPWAP_ERROR_MSG_INVALID_VERSION:
+		status = NSS_CAPWAPMGR_FAILURE_INVALID_VERSION;
+		break;
+	case NSS_CAPWAP_ERROR_MSG_TUNNEL_DISABLED:
+		status = NSS_CAPWAPMGR_FAILURE_TUNNEL_DISABLED;
+		break;
+	case NSS_CAPWAP_ERROR_MSG_TUNNEL_ENABLED:
+		status = NSS_CAPWAPMGR_FAILURE_TUNNEL_ENABLED;
+		break;
+	case NSS_CAPWAP_ERROR_MSG_TUNNEL_NOT_CFG:
+		status = NSS_CAPWAPMGR_FAILURE_TUNNEL_NOT_CFG;
+		break;
+	case NSS_CAPWAP_ERROR_MSG_INVALID_IP_NODE:
+		status = NSS_CAPWAPMGR_FAILURE_INVALID_IP_NODE;
+		break;
+	case NSS_CAPWAP_ERROR_MSG_INVALID_TYPE_FLAG:
+		status = NSS_CAPWAPMGR_FAILURE_INVALID_TYPE_FLAG;
+		break;
+	case NSS_CAPWAP_ERROR_MSG_INVALID_DTLS_CFG:
+		status = NSS_CAPWAPMGR_FAILURE_INVALID_DTLS_CFG;
+		break;
+	default:
+		status = NSS_CAPWAPMGR_FAILURE;
+	}
+
+	return status;
+}
+
+/*
+ * nss_capwapmgr_verify_tunnel_param()
+ *	Common function to verify tunnel_id and returns pointer to tunnel.
+ *
+ * The caller of the function should hold reference to the net device before calling.
+ */
+static struct nss_capwapmgr_tunnel *nss_capwapmgr_verify_tunnel_param(struct net_device *dev, uint8_t tunnel_id)
+{
+	struct nss_capwapmgr_priv *priv;
+	struct nss_capwapmgr_tunnel *t;
+
+	if (!dev) {
+		nss_capwapmgr_warn("Invalid net_device\n");
+		return NULL;
+	}
+
+	if (tunnel_id > NSS_CAPWAPMGR_MAX_TUNNELS) {
+		nss_capwapmgr_warn("%px: tunnel_id: %d out of range (%d)\n", dev, tunnel_id, NSS_CAPWAPMGR_MAX_TUNNELS);
+		return NULL;
+	}
+
+	priv = netdev_priv(dev);
+	t = &priv->tunnel[tunnel_id];
+	if ( (t->if_num_inner == -1) || (t->if_num_outer == -1) ) {
+		return NULL;
+	}
+
+	return t;
+}
+
+/*
+ * nss_capwapmgr_netdev_create()
+ *	API to create a CAPWAP netdev
+ */
+struct net_device *nss_capwapmgr_netdev_create()
+{
+	struct nss_capwapmgr_priv *priv;
+	struct nss_capwapmgr_response *r;
+	struct net_device *ndev;
+	int i;
+	int err;
+
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 16, 0))
+	ndev = alloc_netdev(sizeof(struct nss_capwapmgr_priv),
+					"nsscapwap%d", nss_capwapmgr_dummpy_netdev_setup);
+#else
+	ndev = alloc_netdev(sizeof(struct nss_capwapmgr_priv),
+					"nsscapwap%d", NET_NAME_ENUM, nss_capwapmgr_dummpy_netdev_setup);
+#endif
+	if (!ndev) {
+		nss_capwapmgr_warn("Error allocating netdev\n");
+		return NULL;
+	}
+
+	err = register_netdev(ndev);
+	if (err) {
+		nss_capwapmgr_warn("register_netdev() fail with error :%d\n", err);
+		free_netdev(ndev);
+		return NULL;
+	}
+
+	priv = netdev_priv(ndev);
+	priv->nss_ctx = nss_capwap_get_ctx();
+	priv->tunnel = kmalloc(sizeof(struct nss_capwapmgr_tunnel) * NSS_CAPWAPMGR_MAX_TUNNELS, GFP_ATOMIC);
+	if (!priv->tunnel) {
+		nss_capwapmgr_warn("%px: failed to allocate tunnel memory\n", ndev);
+		goto fail1;
+	}
+	memset(priv->tunnel, 0, sizeof(struct nss_capwapmgr_tunnel) * NSS_CAPWAPMGR_MAX_TUNNELS);
+	for (i = 0; i < NSS_CAPWAPMGR_MAX_TUNNELS; i++) {
+		priv->tunnel[i].if_num_inner = -1;
+		priv->tunnel[i].if_num_outer = -1;
+	}
+
+	priv->resp = kmalloc(sizeof(struct nss_capwapmgr_response) * NSS_MAX_DYNAMIC_INTERFACES, GFP_ATOMIC);
+	if (!priv->resp) {
+		nss_capwapmgr_warn("%px: failed to allocate tunnel response memory\n", ndev);
+		goto fail2;
+	}
+	for (i = 0; i < NSS_MAX_DYNAMIC_INTERFACES; i++) {
+		r = &priv->resp[i];
+		init_waitqueue_head(&r->wq);
+
+		/*
+		 * CAPWAP interface is limited to one command per-tunnel.
+		 */
+		sema_init(&r->sem, 1);
+	}
+
+	priv->if_num_to_tunnel_id = kmalloc(sizeof(uint8_t) * NSS_MAX_NET_INTERFACES, GFP_ATOMIC);
+	if (!priv->if_num_to_tunnel_id) {
+		nss_capwapmgr_warn("%px: failed to allocate if_num to tunnel_id memory\n", ndev);
+		goto fail3;
+	}
+	memset(priv->if_num_to_tunnel_id, 0, sizeof(uint8_t) * NSS_MAX_NET_INTERFACES);
+
+	if (nss_cmn_register_queue_decongestion(priv->nss_ctx, nss_capwapmgr_decongestion_callback, ndev) != NSS_CB_REGISTER_SUCCESS) {
+		nss_capwapmgr_warn("%px: failed to register decongestion callback\n", ndev);
+		goto fail4;
+	}
+
+	return ndev;
+fail4:
+	kfree(priv->if_num_to_tunnel_id);
+fail3:
+	kfree(priv->resp);
+fail2:
+	kfree(priv->tunnel);
+fail1:
+	unregister_netdev(ndev);
+	free_netdev(ndev);
+	return NULL;
+}
+EXPORT_SYMBOL(nss_capwapmgr_netdev_create);
+
+/*
+ * nss_capwapmgr_netdev_destroy()
+ *	API for destroying a netdevice.
+ *
+ * All the CAPWAP tunnels must be destroyed first before netdevice.
+ */
+nss_capwapmgr_status_t nss_capwapmgr_netdev_destroy(struct net_device *dev)
+{
+	rtnl_is_locked() ? unregister_netdevice(dev) : unregister_netdev(dev);
+	return NSS_CAPWAPMGR_SUCCESS;
+}
+EXPORT_SYMBOL(nss_capwapmgr_netdev_destroy);
+
+/*
+ * nss_capwapmgr_register_with_nss()
+ *	Internal function to register with NSS FW.
+ */
+static nss_capwapmgr_status_t nss_capwapmgr_register_with_nss(uint32_t interface_num, struct net_device *dev)
+{
+	struct nss_ctx_instance *ctx;
+
+	/* features denote the skb_types supported */
+	uint32_t features = 0;
+
+	ctx = nss_capwap_data_register(interface_num, nss_capwapmgr_receive_pkt, dev, features);
+	if (!ctx) {
+		nss_capwapmgr_warn("%px: %d: nss_capwapmgr_data_register failed\n", dev, interface_num);
+		return NSS_CAPWAPMGR_FAILURE;
+	}
+
+	return NSS_CAPWAPMGR_SUCCESS;
+}
+
+/*
+ * nss_capwapmgr_unregister_with_nss()
+ *	Internal function to unregister with NSS FW
+ */
+static void nss_capwapmgr_unregister_with_nss(uint32_t if_num)
+{
+	nss_capwapmgr_trace("%d: unregister with NSS FW\n", if_num);
+	nss_capwap_data_unregister(if_num);
+}
+
+/*
+ * nss_capwapmgr_destroy_ipv4_rule()
+ *	Destroy a given connection in the NSS
+ */
+static nss_tx_status_t nss_capwapmgr_destroy_ipv4_rule(void *ctx, struct nss_ipv4_destroy *unid)
+{
+	struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *) ctx;
+	struct nss_ipv4_msg nim;
+	struct nss_ipv4_rule_destroy_msg *nirdm;
+	nss_tx_status_t status;
+
+	nss_capwapmgr_info("%px: ctx: Destroy IPv4: %pI4h:%d, %pI4h:%d, p: %d\n", nss_ctx,
+		&unid->src_ip, ntohs(unid->src_port), &unid->dest_ip, ntohs(unid->dest_port), unid->protocol);
+
+	nss_ipv4_msg_init(&nim, NSS_IPV4_RX_INTERFACE, NSS_IPV4_TX_DESTROY_RULE_MSG,
+			sizeof(struct nss_ipv4_rule_destroy_msg), nss_capwapmgr_ipv4_handler, NULL);
+
+	nirdm = &nim.msg.rule_destroy;
+
+	nirdm->tuple.protocol = (uint8_t)unid->protocol;
+	nirdm->tuple.flow_ip = unid->src_ip;
+	nirdm->tuple.flow_ident = (uint32_t)unid->src_port;
+	nirdm->tuple.return_ip = unid->dest_ip;
+	nirdm->tuple.return_ident = (uint32_t)unid->dest_port;
+
+	down(&ip_response.sem);
+	status = nss_ipv4_tx(nss_ctx, &nim);
+	if (status != NSS_TX_SUCCESS) {
+		up(&ip_response.sem);
+		nss_capwapmgr_warn("%px: Destroy IPv4 message failed %d\n", ctx, status);
+		return status;
+	}
+
+	ip_response.cond = 1;
+	if (!wait_event_timeout(ip_response.wq, ip_response.cond == 0, 5 * HZ)) {
+		nss_capwapmgr_warn("%px: Destroy IPv4 command msg response timeout\n", ctx);
+		status = NSS_TX_FAILURE;
+	} else if (ip_response.response != NSS_CMN_RESPONSE_ACK) {
+		nss_capwapmgr_warn("%px: Destroy IPv4 command msg failed with response : %d\n", ctx, ip_response.response);
+		status = NSS_TX_FAILURE;
+	}
+
+	up(&ip_response.sem);
+	return status;
+}
+
+/*
+ * nss_capwapmgr_unconfigure_ipv4_rule()
+ *	Internal function to unconfigure IPv4 rule.
+ */
+static nss_tx_status_t nss_capwapmgr_unconfigure_ipv4_rule(struct nss_ipv4_destroy *destroy)
+{
+	void *ctx;
+
+	ctx = nss_ipv4_get_mgr();
+	if (!ctx) {
+		nss_capwapmgr_warn("%s: couldn't get IPv4 ctx\n", "CAPWAP");
+		return NSS_TX_FAILURE_NOT_READY;
+	}
+
+	return nss_capwapmgr_destroy_ipv4_rule(ctx, destroy);
+}
+
+/*
+ * nss_capwapmgr_unconfigure_ipv6_rule()
+ *	Internal function to unconfigure IPv6 rule.
+ */
+static nss_tx_status_t nss_capwapmgr_unconfigure_ipv6_rule(struct nss_ipv6_destroy *unid)
+{
+	struct nss_ctx_instance *nss_ctx;
+	struct nss_ipv6_msg nim;
+	struct nss_ipv6_rule_destroy_msg *nirdm;
+	nss_tx_status_t status;
+
+	nss_ctx = nss_ipv6_get_mgr();
+	if (!nss_ctx) {
+		nss_capwapmgr_warn("%s: couldn't get IPv6 ctx\n", "CAPWAP");
+		return NSS_TX_FAILURE_NOT_READY;
+	}
+
+	nss_capwapmgr_info("%px: ctx: Destroy IPv4: %x:%d, %x:%d, p: %d\n", nss_ctx,
+		unid->src_ip[0], ntohs(unid->src_port), unid->dest_ip[0], ntohs(unid->dest_port), unid->protocol);
+
+	nss_ipv6_msg_init(&nim, NSS_IPV6_RX_INTERFACE, NSS_IPV6_TX_DESTROY_RULE_MSG,
+			sizeof(struct nss_ipv6_rule_destroy_msg), nss_capwapmgr_ipv6_handler, NULL);
+
+	nirdm = &nim.msg.rule_destroy;
+
+	nirdm->tuple.protocol = (uint8_t)unid->protocol;
+	nirdm->tuple.flow_ident = (uint32_t)unid->src_port;
+	nirdm->tuple.flow_ip[0] = unid->src_ip[0];
+	nirdm->tuple.flow_ip[1] = unid->src_ip[1];
+	nirdm->tuple.flow_ip[2] = unid->src_ip[2];
+	nirdm->tuple.flow_ip[3] = unid->src_ip[3];
+
+	nirdm->tuple.return_ident = (uint32_t)unid->dest_port;
+	nirdm->tuple.return_ip[0] = unid->dest_ip[0];
+	nirdm->tuple.return_ip[1] = unid->dest_ip[1];
+	nirdm->tuple.return_ip[2] = unid->dest_ip[2];
+	nirdm->tuple.return_ip[3] = unid->dest_ip[3];
+
+	down(&ip_response.sem);
+	status = nss_ipv6_tx(nss_ctx, &nim);
+	if (status != NSS_TX_SUCCESS) {
+		up(&ip_response.sem);
+		nss_capwapmgr_warn("%px: Destroy IPv6 message failed %d\n", nss_ctx, status);
+		return status;
+	}
+
+	ip_response.cond = 1;
+	if (!wait_event_timeout(ip_response.wq, ip_response.cond == 0, 5 * HZ)) {
+		nss_capwapmgr_warn("%px: Destroy IPv6 command msg response timeout\n", nss_ctx);
+		status = NSS_TX_FAILURE;
+	} else if (ip_response.response != NSS_CMN_RESPONSE_ACK) {
+		nss_capwapmgr_warn("%px: Destroy IPv6 command msg failed with response : %d\n", nss_ctx, ip_response.response);
+		status = NSS_TX_FAILURE;
+	}
+
+	up(&ip_response.sem);
+	return status;
+}
+
+/*
+ * nss_capwapmgr_create_ipv4_rule()
+ *	Create a nss entry to accelerate the given connection
+ */
+static nss_tx_status_t nss_capwapmgr_create_ipv4_rule(void *ctx, struct nss_ipv4_create *unic, uint16_t rule_flags, uint16_t valid_flags)
+{
+	struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *) ctx;
+	struct nss_ipv4_msg nim;
+	struct nss_ipv4_rule_create_msg *nircm;
+	nss_tx_status_t status;
+
+	nss_capwapmgr_info("%px: ctx: Create IPv4: %pI4h:%d (%pI4h:%d), %pI4h:%d (%pI4h:%d), p: %d\n", nss_ctx,
+		&unic->src_ip, unic->src_port, &unic->src_ip_xlate, unic->src_port_xlate,
+		&unic->dest_ip, unic->dest_port, &unic->dest_ip_xlate, unic->dest_port_xlate,
+		unic->protocol);
+
+	memset(&nim, 0, sizeof (struct nss_ipv4_msg));
+	nss_ipv4_msg_init(&nim, NSS_IPV4_RX_INTERFACE, NSS_IPV4_TX_CREATE_RULE_MSG,
+			sizeof(struct nss_ipv4_rule_create_msg), nss_capwapmgr_ipv4_handler, NULL);
+
+	nircm = &nim.msg.rule_create;
+	nircm->valid_flags = 0;
+	nircm->rule_flags = 0;
+
+	/*
+	 * Copy over the 5 tuple details.
+	 */
+	nircm->tuple.protocol = (uint8_t)unic->protocol;
+	nircm->tuple.flow_ip = unic->src_ip;
+	nircm->tuple.flow_ident = (uint32_t)unic->src_port;
+	nircm->tuple.return_ip = unic->dest_ip;
+	nircm->tuple.return_ident = (uint32_t)unic->dest_port;
+
+	/*
+	 * Copy over the connection rules and set the CONN_VALID flag
+	 */
+	nircm->conn_rule.flow_interface_num = unic->src_interface_num;
+	nircm->conn_rule.flow_mtu = unic->from_mtu;
+	nircm->conn_rule.flow_ip_xlate = unic->src_ip_xlate;
+	nircm->conn_rule.flow_ident_xlate = (uint32_t)unic->src_port_xlate;
+	memcpy(nircm->conn_rule.flow_mac, unic->src_mac, 6);
+	nircm->conn_rule.return_interface_num = unic->dest_interface_num;
+	nircm->conn_rule.return_mtu = unic->to_mtu;
+	nircm->conn_rule.return_ip_xlate = unic->dest_ip_xlate;
+	nircm->conn_rule.return_ident_xlate = (uint32_t)unic->dest_port_xlate;
+	if (nircm->tuple.return_ip != nircm->conn_rule.return_ip_xlate ||
+		nircm->tuple.return_ident != nircm->conn_rule.return_ident_xlate) {
+		memcpy(nircm->conn_rule.return_mac, unic->dest_mac_xlate, 6);
+	} else {
+		memcpy(nircm->conn_rule.return_mac, unic->dest_mac, 6);
+	}
+
+	/*
+	 * Copy over the DSCP rule parameters
+	 */
+	if (unic->flags & NSS_IPV4_CREATE_FLAG_DSCP_MARKING) {
+		nircm->dscp_rule.flow_dscp = unic->flow_dscp;
+		nircm->dscp_rule.return_dscp = unic->return_dscp;
+		nircm->rule_flags |= NSS_IPV4_RULE_CREATE_FLAG_DSCP_MARKING;
+		nircm->valid_flags |= NSS_IPV4_RULE_CREATE_DSCP_MARKING_VALID;
+	}
+
+	nircm->valid_flags |= NSS_IPV4_RULE_CREATE_CONN_VALID;
+
+	/*
+	 * Copy over the pppoe rules and set the PPPOE_VALID flag.
+	 */
+	nircm->pppoe_rule.flow_if_exist = unic->flow_pppoe_if_exist;
+	nircm->pppoe_rule.flow_if_num = unic->flow_pppoe_if_num;
+	nircm->pppoe_rule.return_if_exist = unic->return_pppoe_if_exist;
+	nircm->pppoe_rule.return_if_num = unic->return_pppoe_if_num;
+	nircm->valid_flags |= NSS_IPV4_RULE_CREATE_PPPOE_VALID;
+
+	/*
+	 * Copy over the vlan rules and set the VLAN_VALID flag
+	 */
+	nircm->vlan_primary_rule.ingress_vlan_tag = unic->in_vlan_tag[0];
+	nircm->vlan_primary_rule.egress_vlan_tag = unic->out_vlan_tag[0];
+	nircm->vlan_secondary_rule.ingress_vlan_tag = unic->in_vlan_tag[1];
+	nircm->vlan_secondary_rule.egress_vlan_tag = unic->out_vlan_tag[1];
+	nircm->valid_flags |= NSS_IPV4_RULE_CREATE_VLAN_VALID;
+
+	/*
+	 * Copy over the qos rules and set the QOS_VALID flag
+	 */
+	if (unic->flags & NSS_IPV6_CREATE_FLAG_QOS_VALID) {
+		nircm->qos_rule.flow_qos_tag = unic->flow_qos_tag;
+		nircm->qos_rule.return_qos_tag = unic->return_qos_tag;
+		nircm->valid_flags |= NSS_IPV4_RULE_CREATE_QOS_VALID;
+	}
+
+	if (unic->flags & NSS_IPV4_CREATE_FLAG_NO_SEQ_CHECK) {
+		nircm->rule_flags |= NSS_IPV4_RULE_CREATE_FLAG_NO_SEQ_CHECK;
+	}
+
+	if (unic->flags & NSS_IPV4_CREATE_FLAG_BRIDGE_FLOW) {
+		nircm->rule_flags |= NSS_IPV4_RULE_CREATE_FLAG_BRIDGE_FLOW;
+	}
+
+	if (unic->flags & NSS_IPV4_CREATE_FLAG_ROUTED) {
+		nircm->rule_flags |= NSS_IPV4_RULE_CREATE_FLAG_ROUTED;
+	}
+
+	/*
+	 * Set the flag NSS_IPV4_RULE_CREATE_FLAG_ICMP_NO_CME_FLUSH so that
+	 * rule is not flushed when NSS FW receives ICMP errors/packets.
+	 */
+	nircm->rule_flags |= NSS_IPV4_RULE_CREATE_FLAG_ICMP_NO_CME_FLUSH;
+
+	/*
+	 * Add any other additional flags which caller has requested.
+	 * For example: update MTU, update destination MAC address.
+	 */
+	nircm->rule_flags |= rule_flags;
+	nircm->valid_flags |= valid_flags;
+
+	down(&ip_response.sem);
+	status = nss_ipv4_tx(nss_ctx, &nim);
+	if (status != NSS_TX_SUCCESS) {
+		up(&ip_response.sem);
+		nss_capwapmgr_warn("%px: Create IPv4 message failed %d\n", ctx, status);
+		return status;
+	}
+
+	ip_response.cond = 1;
+	if (!wait_event_timeout(ip_response.wq, ip_response.cond == 0, 5 * HZ)) {
+		nss_capwapmgr_warn("%px: Create IPv4 command msg response timeout\n", ctx);
+		status = NSS_TX_FAILURE;
+	} else if (ip_response.response != NSS_CMN_RESPONSE_ACK) {
+		nss_capwapmgr_warn("%px: Create IPv4 command msg failed with response: %d\n", ctx, ip_response.response);
+		status = NSS_TX_FAILURE;
+	}
+
+	up(&ip_response.sem);
+	return status;
+}
+
+/*
+ * nss_capwapmgr_create_ipv6_rule()
+ *	Create a nss entry to accelerate the given connection
+ */
+static nss_tx_status_t nss_capwapmgr_create_ipv6_rule(void *ctx, struct nss_ipv6_create *unic, uint16_t rule_flags, uint16_t valid_flags)
+{
+	struct nss_ctx_instance *nss_ctx = (struct nss_ctx_instance *) ctx;
+	struct nss_ipv6_msg nim;
+	struct nss_ipv6_rule_create_msg *nircm;
+	nss_tx_status_t status;
+
+	nss_capwapmgr_info("%px: Create IPv6: %pI6:%d, %pI6:%d, p: %d\n", nss_ctx,
+		unic->src_ip, unic->src_port, unic->dest_ip, unic->dest_port, unic->protocol);
+
+	memset(&nim, 0, sizeof (struct nss_ipv6_msg));
+	nss_ipv6_msg_init(&nim, NSS_IPV6_RX_INTERFACE, NSS_IPV6_TX_CREATE_RULE_MSG,
+			sizeof(struct nss_ipv6_rule_create_msg), nss_capwapmgr_ipv6_handler, NULL);
+
+	nircm = &nim.msg.rule_create;
+
+	nircm->rule_flags = 0;
+	nircm->valid_flags = 0;
+
+	/*
+	 * Copy over the 5 tuple information.
+	 */
+	nircm->tuple.protocol = (uint8_t)unic->protocol;
+	nircm->tuple.flow_ip[0] = unic->src_ip[0];
+	nircm->tuple.flow_ip[1] = unic->src_ip[1];
+	nircm->tuple.flow_ip[2] = unic->src_ip[2];
+	nircm->tuple.flow_ip[3] = unic->src_ip[3];
+	nircm->tuple.flow_ident = (uint32_t)unic->src_port;
+
+	nircm->tuple.return_ip[0] = unic->dest_ip[0];
+	nircm->tuple.return_ip[1] = unic->dest_ip[1];
+	nircm->tuple.return_ip[2] = unic->dest_ip[2];
+	nircm->tuple.return_ip[3] = unic->dest_ip[3];
+	nircm->tuple.return_ident = (uint32_t)unic->dest_port;
+
+	/*
+	 * Copy over the connection rules and set CONN_VALID flag
+	 */
+	nircm->conn_rule.flow_interface_num = unic->src_interface_num;
+	nircm->conn_rule.flow_mtu = unic->from_mtu;
+	nircm->conn_rule.return_interface_num = unic->dest_interface_num;
+	nircm->conn_rule.return_mtu = unic->to_mtu;
+	memcpy(nircm->conn_rule.flow_mac, unic->src_mac, 6);
+	memcpy(nircm->conn_rule.return_mac, unic->dest_mac, 6);
+	nircm->valid_flags |= NSS_IPV6_RULE_CREATE_CONN_VALID;
+
+	/*
+	 * Copy over the DSCP rule parameters
+	 */
+	if (unic->flags & NSS_IPV6_CREATE_FLAG_DSCP_MARKING) {
+		nircm->dscp_rule.flow_dscp = unic->flow_dscp;
+		nircm->dscp_rule.return_dscp = unic->return_dscp;
+		nircm->rule_flags |= NSS_IPV6_RULE_CREATE_FLAG_DSCP_MARKING;
+		nircm->valid_flags |= NSS_IPV6_RULE_CREATE_DSCP_MARKING_VALID;
+	}
+
+	/*
+	 * Copy over the pppoe rules and set PPPOE_VALID flag
+	 */
+	nircm->pppoe_rule.flow_if_exist = unic->flow_pppoe_if_exist;
+	nircm->pppoe_rule.flow_if_num = unic->flow_pppoe_if_num;
+	nircm->pppoe_rule.return_if_exist = unic->return_pppoe_if_exist;
+	nircm->pppoe_rule.return_if_num = unic->return_pppoe_if_num;
+	nircm->valid_flags |= NSS_IPV6_RULE_CREATE_PPPOE_VALID;
+
+	/*
+	 * Copy over the tcp rules and set TCP_VALID flag
+	 */
+	nircm->tcp_rule.flow_window_scale = unic->flow_window_scale;
+	nircm->tcp_rule.flow_max_window = unic->flow_max_window;
+	nircm->tcp_rule.flow_end = unic->flow_end;
+	nircm->tcp_rule.flow_max_end = unic->flow_max_end;
+	nircm->tcp_rule.return_window_scale = unic->return_window_scale;
+	nircm->tcp_rule.return_max_window = unic->return_max_window;
+	nircm->tcp_rule.return_end = unic->return_end;
+	nircm->tcp_rule.return_max_end = unic->return_max_end;
+	nircm->valid_flags |= NSS_IPV6_RULE_CREATE_TCP_VALID;
+
+	/*
+	 * Copy over the vlan rules and set the VLAN_VALID flag
+	 */
+	nircm->vlan_primary_rule.egress_vlan_tag = unic->out_vlan_tag[0];
+	nircm->vlan_primary_rule.ingress_vlan_tag = unic->in_vlan_tag[0];
+	nircm->vlan_secondary_rule.egress_vlan_tag = unic->out_vlan_tag[1];
+	nircm->vlan_secondary_rule.ingress_vlan_tag = unic->in_vlan_tag[1];
+	nircm->valid_flags |= NSS_IPV6_RULE_CREATE_VLAN_VALID;
+
+	/*
+	 * Copy over the qos rules and set the QOS_VALID flag
+	 */
+	if (unic->flags & NSS_IPV6_CREATE_FLAG_QOS_VALID) {
+		nircm->qos_rule.flow_qos_tag = unic->flow_qos_tag;
+		nircm->qos_rule.return_qos_tag = unic->return_qos_tag;
+		nircm->valid_flags |= NSS_IPV6_RULE_CREATE_QOS_VALID;
+	}
+
+	if (unic->flags & NSS_IPV6_CREATE_FLAG_NO_SEQ_CHECK) {
+		nircm->rule_flags |= NSS_IPV6_RULE_CREATE_FLAG_NO_SEQ_CHECK;
+	}
+
+	if (unic->flags & NSS_IPV6_CREATE_FLAG_BRIDGE_FLOW) {
+		nircm->rule_flags |= NSS_IPV6_RULE_CREATE_FLAG_BRIDGE_FLOW;
+	}
+
+	if (unic->flags & NSS_IPV6_CREATE_FLAG_ROUTED) {
+		nircm->rule_flags |= NSS_IPV6_RULE_CREATE_FLAG_ROUTED;
+	}
+
+	/*
+	 * Set the flag NSS_IPV6_RULE_CREATE_FLAG_ICMP_NO_CME_FLUSH so that
+	 * rule is not flushed when NSS FW receives ICMP errors/packets.
+	 */
+	nircm->rule_flags |= NSS_IPV6_RULE_CREATE_FLAG_ICMP_NO_CME_FLUSH;
+
+	/*
+	 * Add any other additional flags which caller has requested.
+	 * For example: update MTU, Update destination MAC address.
+	 */
+	nircm->rule_flags |= rule_flags;
+	nircm->valid_flags |= valid_flags;
+
+	down(&ip_response.sem);
+	status = nss_ipv6_tx(nss_ctx, &nim);
+	if (status != NSS_TX_SUCCESS) {
+		up(&ip_response.sem);
+		nss_capwapmgr_warn("%px: Create IPv6 message failed %d\n", ctx, status);
+		return status;
+	}
+
+	ip_response.cond = 1;
+	if (!wait_event_timeout(ip_response.wq, ip_response.cond == 0, 5 * HZ)) {
+		nss_capwapmgr_warn("%px: Create IPv6 command msg response timeout\n", ctx);
+		status = NSS_TX_FAILURE;
+	} else if (ip_response.response != NSS_CMN_RESPONSE_ACK) {
+		nss_capwapmgr_warn("%px: Create IPv6 command msg failed with response: %d\n", ctx, ip_response.response);
+		status = NSS_TX_FAILURE;
+	}
+
+	up(&ip_response.sem);
+	return status;
+}
+
+/*
+ * nss_capwapmgr_configure_ipv4()
+ *	Internal function for configuring IPv4 connection
+ */
+static nss_tx_status_t nss_capwapmgr_configure_ipv4(struct nss_ipv4_create *pcreate, uint16_t rule_flags, uint16_t valid_flags)
+{
+	nss_tx_status_t status;
+	void *ctx;
+
+	ctx = nss_ipv4_get_mgr();
+	if (!ctx) {
+		nss_capwapmgr_warn("%s couldn't get IPv4 ctx\n", "CAPWAP");
+		return NSS_TX_FAILURE_NOT_READY;
+	}
+
+	status = nss_capwapmgr_create_ipv4_rule(ctx, pcreate, rule_flags, valid_flags);
+	if (status != NSS_TX_SUCCESS) {
+		nss_capwapmgr_warn("%px: ctx: nss_ipv4_tx() failed with %d\n", ctx, status);
+		return status;
+	}
+
+	return NSS_TX_SUCCESS;
+}
+
+/*
+ * nss_capwapmgr_configure_ipv6()
+ *	Internal function for configuring IPv4 connection
+ */
+static nss_tx_status_t nss_capwapmgr_configure_ipv6(struct nss_ipv6_create *pcreate, uint16_t rule_flags, uint16_t valid_flags)
+{
+	nss_tx_status_t status;
+	void *ctx;
+
+	ctx = nss_ipv6_get_mgr();
+	if (!ctx) {
+		nss_capwapmgr_warn("%s couldn't get IPv6 ctx\n", "CAPWAP");
+		return NSS_TX_FAILURE_NOT_READY;
+	}
+
+	status = nss_capwapmgr_create_ipv6_rule(ctx, pcreate, rule_flags, valid_flags);
+	if (status != NSS_TX_SUCCESS) {
+		nss_capwapmgr_warn("%px: ctx: nss_ipv6_tx() failed with %d\n", ctx, status);
+		return status;
+	}
+
+	return NSS_TX_SUCCESS;
+}
+
+/*
+ * nss_capwapmgr_tx_msg_sync()
+ *	Waits for message to return.
+ */
+static nss_capwapmgr_status_t nss_capwapmgr_tx_msg_sync(struct nss_ctx_instance *ctx, struct net_device *dev, struct nss_capwap_msg *msg)
+{
+	struct nss_capwapmgr_priv *priv;
+	struct nss_capwapmgr_response *r;
+	uint32_t if_num;
+	nss_capwapmgr_status_t status;
+
+	if_num = msg->cm.interface - NSS_DYNAMIC_IF_START;
+	dev_hold(dev);
+	priv = netdev_priv(dev);
+	r = &priv->resp[if_num];
+	down(&r->sem);
+	r->response = NSS_CMN_RESPONSE_ACK;
+	atomic_set(&r->seq, 1);		/* Indicate that we are waiting */
+
+	/*
+	 * Call NSS driver
+	 */
+	status = nss_capwap_tx_msg(ctx, msg);
+	if (status != NSS_CAPWAPMGR_SUCCESS) {
+		up(&r->sem);
+		dev_put(dev);
+		return status;
+	}
+
+	if (!wait_event_timeout(r->wq, atomic_read(&r->seq) == 0, 5 * HZ)) {
+		atomic_set(&r->seq, 0);		/* Indicate that we are no longer waiting */
+		up(&r->sem);
+		nss_capwapmgr_warn("%px: CAPWAP command msg response timeout\n", ctx);
+		dev_put(dev);
+		return NSS_CAPWAPMGR_FAILURE_CMD_TIMEOUT;
+	}
+
+	/*
+	 * If NSS FW responded back with an error.
+	 */
+	if (r->response != NSS_CMN_RESPONSE_ACK) {
+		up(&r->sem);
+		nss_capwapmgr_warn("%px: CAPWAP command msg response : %d, error:%d\n", ctx,
+				r->response, r->error);
+		dev_put(dev);
+		return nss_capwap_remap_error(r->error);
+	}
+
+	up(&r->sem);
+	dev_put(dev);
+	return status;
+}
+
+/*
+ * nss_capwapmgr_create_capwap_rule()
+ *	Internal function to create a CAPWAP rule
+ */
+static nss_capwapmgr_status_t nss_capwapmgr_create_capwap_rule(struct net_device *dev,
+	uint32_t if_num, struct nss_capwap_rule_msg *msg, uint16_t type_flags)
+{
+	struct nss_ctx_instance *ctx = nss_capwap_get_ctx();
+	struct nss_capwap_msg capwapmsg;
+	struct nss_capwap_rule_msg *capwapcfg;
+	nss_tx_status_t status;
+
+	nss_capwapmgr_info("%px: ctx: CAPWAP Rule src_port: 0x%d dest_port:0x%d\n", ctx,
+	    ntohl(msg->encap.src_port), ntohl(msg->encap.dest_port));
+
+	/*
+	 * Verify CAPWAP rule parameters.
+	 */
+	if (ntohl(msg->decap.reassembly_timeout) > NSS_CAPWAP_MAX_REASSEMBLY_TIMEOUT) {
+		nss_capwapmgr_warn("%px: invalid reassem timeout: %d, max: %d\n",
+			ctx, ntohl(msg->decap.reassembly_timeout), NSS_CAPWAP_MAX_REASSEMBLY_TIMEOUT);
+		return NSS_CAPWAPMGR_FAILURE_BAD_PARAM;
+	}
+
+	if (msg->decap.reassembly_timeout == 0) {
+		msg->decap.reassembly_timeout = htonl(10);	/* 10 milli-seconds */
+	}
+
+	if (ntohl(msg->decap.max_fragments) > NSS_CAPWAP_MAX_FRAGMENTS) {
+		nss_capwapmgr_warn("%px: invalid fragment setting: %d, max: %d\n",
+			ctx, ntohl(msg->decap.max_fragments), NSS_CAPWAP_MAX_FRAGMENTS);
+		return NSS_CAPWAPMGR_FAILURE_BAD_PARAM;
+	}
+
+	if (msg->decap.max_fragments == 0) {
+		msg->decap.max_fragments = htonl(NSS_CAPWAP_MAX_FRAGMENTS);
+	}
+
+	if (ntohl(msg->decap.max_buffer_size) > NSS_CAPWAP_MAX_BUFFER_SIZE) {
+		nss_capwapmgr_warn("%px: invalid buffer size: %d, max: %d\n",
+			ctx, ntohl(msg->decap.max_buffer_size), NSS_CAPWAP_MAX_BUFFER_SIZE);
+		return NSS_CAPWAPMGR_FAILURE_BAD_PARAM;
+	}
+
+	if (msg->decap.max_buffer_size == 0) {
+		msg->decap.max_buffer_size = htonl(nss_capwap_get_max_buf_size(ctx));
+	}
+
+	if (ntohl(msg->encap.path_mtu) > NSS_CAPWAP_MAX_MTU) {
+		nss_capwapmgr_warn("%px: invalid path_mtu: %d, max: %d\n",
+			ctx, ntohl(msg->encap.path_mtu), NSS_CAPWAP_MAX_MTU);
+		return NSS_CAPWAPMGR_FAILURE_BAD_PARAM;
+	}
+
+	if (msg->encap.path_mtu == 0) {
+		msg->encap.path_mtu = htonl(NSS_CAPWAPMGR_NORMAL_FRAME_MTU);
+	}
+
+	msg->type_flags = type_flags;
+
+	/*
+	 * Prepare the tunnel configuration parameter to send to NSS FW
+	 */
+	memset(&capwapmsg, 0, sizeof(struct nss_capwap_msg));
+	capwapcfg = &capwapmsg.msg.rule;
+	memcpy(capwapcfg, msg, sizeof(struct nss_capwap_rule_msg));
+
+	/*
+	 * Send CAPWAP tunnel create command to NSS
+	 */
+	nss_capwap_msg_init(&capwapmsg, if_num, NSS_CAPWAP_MSG_TYPE_CFG_RULE,
+			sizeof(struct nss_capwap_rule_msg),
+			nss_capwapmgr_msg_event_receive, dev);
+
+	status = nss_capwapmgr_tx_msg_sync(ctx, dev, &capwapmsg);
+	if (status != NSS_TX_SUCCESS) {
+		nss_capwapmgr_warn("%px: ctx: create encap data tunnel error %d \n", ctx, status);
+		return status;
+	}
+
+	return NSS_CAPWAPMGR_SUCCESS;
+}
+
+/*
+ * nss_capwapmgr_tx_msg_enable_tunnel()
+ *	Common function to send CAPWAP tunnel enable msg
+ */
+static nss_tx_status_t nss_capwapmgr_tx_msg_enable_tunnel(struct nss_ctx_instance *ctx, struct net_device *dev, uint32_t if_num, uint32_t sibling_if_num)
+{
+	struct nss_capwap_msg capwapmsg;
+	nss_tx_status_t status;
+
+	/*
+	 * Prepare the tunnel configuration parameter to send to NSS FW
+	 */
+	memset(&capwapmsg, 0, sizeof(struct nss_capwap_msg));
+	capwapmsg.msg.enable_tunnel.sibling_if_num = sibling_if_num;
+
+	/*
+	 * Send CAPWAP data tunnel command to NSS
+	 */
+	nss_capwap_msg_init(&capwapmsg, if_num, NSS_CAPWAP_MSG_TYPE_ENABLE_TUNNEL, sizeof(struct nss_capwap_enable_tunnel_msg), nss_capwapmgr_msg_event_receive, dev);
+
+	status = nss_capwapmgr_tx_msg_sync(ctx, dev, &capwapmsg);
+	if (status != NSS_TX_SUCCESS) {
+		nss_capwapmgr_warn("%px: ctx: CMD: %d Tunnel error : %d \n", ctx, NSS_CAPWAP_MSG_TYPE_ENABLE_TUNNEL, status);
+	}
+
+	return status;
+}
+
+/*
+ * nss_capwapmgr_tunnel_action()
+ *	Common function for CAPWAP tunnel operation messages without
+ *	any message data structures.
+ */
+static nss_tx_status_t nss_capwapmgr_tunnel_action(struct nss_ctx_instance *ctx, struct net_device *dev, uint32_t if_num, nss_capwap_msg_type_t cmd)
+{
+	struct nss_capwap_msg capwapmsg;
+	nss_tx_status_t status;
+
+	/*
+	 * Prepare the tunnel configuration parameter to send to NSS FW
+	 */
+	memset(&capwapmsg, 0, sizeof(struct nss_capwap_msg));
+
+	/*
+	 * Send CAPWAP data tunnel command to NSS
+	 */
+	nss_capwap_msg_init(&capwapmsg, if_num, cmd, 0, nss_capwapmgr_msg_event_receive, dev);
+
+	status = nss_capwapmgr_tx_msg_sync(ctx, dev, &capwapmsg);
+	if (status != NSS_TX_SUCCESS) {
+		nss_capwapmgr_warn("%px: ctx: CMD: %d Tunnel error : %d \n", ctx, cmd, status);
+	}
+
+	return status;
+}
+
+/*
+ * nss_capwapmgr_get_dtls_netdev()
+ *	API for getting the dtls netdev associated to the capwap tunnel
+ *
+ * The caller is expected to do a dev_put() to release the reference.
+ */
+struct net_device *nss_capwapmgr_get_dtls_netdev(struct net_device *capwap_dev, uint8_t tunnel_id)
+{
+	struct nss_capwapmgr_tunnel *t;
+	struct net_device *dtls_dev;
+
+	dev_hold(capwap_dev);
+	t = nss_capwapmgr_verify_tunnel_param(capwap_dev, tunnel_id);
+	if (!t) {
+		nss_capwapmgr_warn("%px: can't find tunnel: %d\n", capwap_dev, tunnel_id);
+		dev_put(capwap_dev);
+		return NULL;
+	}
+
+	dtls_dev = t->dtls_dev;
+	dev_hold(dtls_dev);
+
+	dev_put(capwap_dev);
+
+	return dtls_dev;
+}
+EXPORT_SYMBOL(nss_capwapmgr_get_dtls_netdev);
+
+/*
+ * nss_capwapmgr_update_path_mtu()
+ *	API for updating Path MTU
+ */
+nss_capwapmgr_status_t nss_capwapmgr_update_path_mtu(struct net_device *dev, uint8_t tunnel_id, uint32_t mtu)
+{
+	struct nss_capwapmgr_priv *priv;
+	struct nss_capwap_msg capwapmsg;
+	struct nss_capwapmgr_tunnel *t;
+	nss_capwapmgr_status_t status;
+	nss_tx_status_t nss_status;
+
+	if (mtu > NSS_CAPWAP_MAX_MTU) {
+		nss_capwapmgr_warn("%px: invalid path_mtu: %d, max: %d\n", dev, mtu, NSS_CAPWAP_MAX_MTU);
+		return NSS_CAPWAPMGR_FAILURE_BAD_PARAM;
+	}
+
+	dev_hold(dev);
+	t = nss_capwapmgr_verify_tunnel_param(dev, tunnel_id);
+	if (!t) {
+		nss_capwapmgr_warn("%px: can't find tunnel: %d\n", dev, tunnel_id);
+		dev_put(dev);
+		return NSS_CAPWAPMGR_FAILURE_BAD_PARAM;
+	}
+
+	priv = netdev_priv(dev);
+	nss_capwapmgr_info("%px: %d: tunnel update MTU is being called\n", dev, t->if_num_inner);
+
+	/*
+	 * Prepare the tunnel configuration parameter to send to NSS FW
+	 */
+	memset(&capwapmsg, 0, sizeof(struct nss_capwap_msg));
+
+	/*
+	 * Send CAPWAP data tunnel command to NSS
+	 */
+	nss_capwap_msg_init(&capwapmsg, t->if_num_inner, NSS_CAPWAP_MSG_TYPE_UPDATE_PATH_MTU,
+		sizeof(struct nss_capwap_path_mtu_msg), nss_capwapmgr_msg_event_receive, dev);
+	capwapmsg.msg.mtu.path_mtu = htonl(mtu);
+	status = nss_capwapmgr_tx_msg_sync(priv->nss_ctx, dev, &capwapmsg);
+	if (status != NSS_CAPWAPMGR_SUCCESS) {
+		nss_capwapmgr_warn("%px: Update Path MTU CAPWAP tunnel error : %d \n", dev, status);
+		dev_put(dev);
+		return NSS_CAPWAPMGR_FAILURE_CAPWAP_RULE;
+	}
+
+	/*
+	 * Update the IPv4/IPv6 rule with the new MTU for flow and return
+	 * TODO: Change rule flag to valid flag
+	 */
+	if (t->capwap_rule.l3_proto == NSS_CAPWAP_TUNNEL_IPV4) {
+		struct nss_ipv4_create *v4;
+
+		v4 = &t->ip_rule.v4;
+		v4->from_mtu = v4->to_mtu = mtu;
+		nss_status = nss_capwapmgr_configure_ipv4(v4, NSS_IPV4_RULE_UPDATE_FLAG_CHANGE_MTU, 0);
+		if (nss_status != NSS_TX_SUCCESS) {
+			v4->from_mtu = v4->to_mtu = ntohl(t->capwap_rule.encap.path_mtu);
+		}
+	} else {
+		struct nss_ipv6_create *v6;
+
+		v6 = &t->ip_rule.v6;
+		v6->from_mtu = v6->to_mtu = mtu;
+		nss_status = nss_capwapmgr_configure_ipv6(v6, NSS_IPV6_RULE_UPDATE_FLAG_CHANGE_MTU, 0);
+		if (nss_status != NSS_TX_SUCCESS) {
+			v6->from_mtu = v6->to_mtu = ntohl(t->capwap_rule.encap.path_mtu);
+		}
+	}
+
+	if (nss_status != NSS_TX_SUCCESS) {
+		nss_capwapmgr_warn("%px: Update Path MTU IP RULE tunnel error : %d \n", dev, nss_status);
+		capwapmsg.msg.mtu.path_mtu = t->capwap_rule.encap.path_mtu;
+		status = nss_capwapmgr_tx_msg_sync(priv->nss_ctx, dev, &capwapmsg);
+		if (status != NSS_CAPWAPMGR_SUCCESS) {
+			nss_capwapmgr_warn("%px: Restore Path MTU CAPWAP tunnel error : %d \n", dev, status);
+		}
+
+		dev_put(dev);
+		return NSS_CAPWAPMGR_FAILURE_IP_RULE;
+	}
+
+	t->capwap_rule.encap.path_mtu = htonl(mtu);
+	dev_put(dev);
+	return status;
+}
+EXPORT_SYMBOL(nss_capwapmgr_update_path_mtu);
+
+/*
+ * nss_capwapmgr_update_dest_mac_addr()
+ *	API for updating Destination Mac Addr
+ */
+nss_capwapmgr_status_t nss_capwapmgr_update_dest_mac_addr(struct net_device *dev, uint8_t tunnel_id, uint8_t *mac_addr)
+{
+	struct nss_capwapmgr_priv *priv;
+	struct nss_capwapmgr_tunnel *t;
+	nss_tx_status_t nss_status;
+	nss_capwapmgr_status_t status = NSS_CAPWAPMGR_SUCCESS;
+	struct nss_ipv6_create *v6;
+	uint8_t mac_addr_old[ETH_ALEN];
+
+	dev_hold(dev);
+	t = nss_capwapmgr_verify_tunnel_param(dev, tunnel_id);
+	if (!t) {
+		nss_capwapmgr_warn("%px: can't find tunnel: %d\n", dev, tunnel_id);
+		status = NSS_CAPWAPMGR_FAILURE_BAD_PARAM;
+		goto done;
+	}
+
+
+	priv = netdev_priv(dev);
+	nss_capwapmgr_info("%px: %d: tunnel update mac Addr is being called\n", dev, tunnel_id);
+
+	/*
+	 * Update the IPv4/IPv6 rule with the new destination mac address for flow and return.
+	 * Since the encap direction is handled by the return rule, we are updating the src_mac.
+	 */
+	if (t->capwap_rule.l3_proto == NSS_CAPWAP_TUNNEL_IPV4) {
+		struct nss_ipv4_create *v4;
+
+		v4 = &t->ip_rule.v4;
+		memcpy(mac_addr_old, v4->src_mac, ETH_ALEN);
+		memcpy(v4->src_mac, mac_addr, ETH_ALEN);
+		nss_status = nss_capwapmgr_configure_ipv4(v4, 0, NSS_IPV4_RULE_CREATE_DEST_MAC_VALID);
+
+		if (nss_status != NSS_TX_SUCCESS) {
+			nss_capwapmgr_warn("%px: Update Destination Mac for tunnel error : %d \n", dev, nss_status);
+			memcpy(t->ip_rule.v4.src_mac, mac_addr_old, ETH_ALEN);
+			status = NSS_CAPWAPMGR_FAILURE_IP_RULE;
+		}
+
+		goto done;
+	}
+
+	v6 = &t->ip_rule.v6;
+	memcpy(mac_addr_old, v6->src_mac, ETH_ALEN);
+	memcpy(v6->src_mac, mac_addr, ETH_ALEN);
+	nss_status = nss_capwapmgr_configure_ipv6(v6, 0, NSS_IPV6_RULE_CREATE_DEST_MAC_VALID);
+
+	if (nss_status != NSS_TX_SUCCESS) {
+		nss_capwapmgr_warn("%px: Update Destination Mac for tunnel error : %d \n", dev, nss_status);
+		memcpy(t->ip_rule.v6.src_mac, mac_addr_old, ETH_ALEN);
+		status = NSS_CAPWAPMGR_FAILURE_IP_RULE;
+	}
+
+done:
+	dev_put(dev);
+	return status;
+}
+EXPORT_SYMBOL(nss_capwapmgr_update_dest_mac_addr);
+
+/*
+ * nss_capwapmgr_update_src_interface()
+ *	API for updating Source Interface
+ */
+nss_capwapmgr_status_t nss_capwapmgr_update_src_interface(struct net_device *dev, uint8_t tunnel_id, uint32_t src_interface_num)
+{
+	struct nss_capwapmgr_priv *priv;
+	struct nss_capwapmgr_tunnel *t;
+	nss_tx_status_t nss_status;
+	uint32_t outer_trustsec_enabled, dtls_enabled, forward_if_num, src_interface_num_temp;
+
+	dev_hold(dev);
+	t = nss_capwapmgr_verify_tunnel_param(dev, tunnel_id);
+	if (!t) {
+		nss_capwapmgr_warn("%px: can't find tunnel: %d\n", dev, tunnel_id);
+		dev_put(dev);
+		return NSS_CAPWAPMGR_FAILURE_BAD_PARAM;
+	}
+
+
+	priv = netdev_priv(dev);
+	nss_capwapmgr_info("%px: %d: tunnel update source interface is being called\n", dev, tunnel_id);
+	outer_trustsec_enabled = t->capwap_rule.enabled_features & NSS_CAPWAPMGR_FEATURE_OUTER_TRUSTSEC_ENABLED;
+	dtls_enabled = t->capwap_rule.enabled_features & NSS_CAPWAPMGR_FEATURE_DTLS_ENABLED;
+
+	/*
+	 * If trustsec is enabled, just update the next node of trustsec.
+	 */
+	if (outer_trustsec_enabled) {
+		if (!dtls_enabled) {
+			forward_if_num = nss_capwap_ifnum_with_core_id(t->if_num_outer);
+		} else {
+			forward_if_num = nss_dtlsmgr_get_interface(t->dtls_dev, NSS_DTLSMGR_INTERFACE_TYPE_OUTER);
+		}
+
+		nss_status = nss_trustsec_tx_update_nexthop(forward_if_num, src_interface_num, t->capwap_rule.outer_sgt_value);
+		if (nss_status != NSS_TX_SUCCESS) {
+			nss_capwapmgr_warn("%px: unconfigure trustsec_tx failed\n", dev);
+			dev_put(dev);
+			return NSS_CAPWAPMGR_FAILURE_UNCONFIGURE_TRUSTSEC_TX;
+		}
+
+		if (t->capwap_rule.l3_proto == NSS_CAPWAP_TUNNEL_IPV4) {
+			t->ip_rule.v4.src_interface_num = src_interface_num;
+		} else {
+			t->ip_rule.v6.src_interface_num = src_interface_num;
+		}
+		dev_put(dev);
+		return NSS_CAPWAPMGR_SUCCESS;
+	}
+
+	/*
+	 * Destroy/Re-Create the IPv4/IPv6 rule with the new Interface number for flow and return
+	 */
+	if (t->capwap_rule.l3_proto == NSS_CAPWAP_TUNNEL_IPV4) {
+
+		/*
+		 * Destroy the IP rule only if it already exist.
+		 */
+		if (t->tunnel_state & NSS_CAPWAPMGR_TUNNEL_STATE_IPRULE_CONFIGURED) {
+			struct nss_ipv4_destroy v4_destroy;
+			v4_destroy.protocol = IPPROTO_UDP;
+			v4_destroy.src_ip = t->ip_rule.v4.src_ip;
+			v4_destroy.dest_ip = t->ip_rule.v4.dest_ip;
+			v4_destroy.src_port = t->ip_rule.v4.src_port;
+			v4_destroy.dest_port = t->ip_rule.v4.dest_port;
+			nss_status = nss_capwapmgr_unconfigure_ipv4_rule(&v4_destroy);
+			if (nss_status != NSS_TX_SUCCESS) {
+				nss_capwapmgr_warn("%px: unconfigure ipv4 rule failed : %d\n", dev, nss_status);
+				dev_put(dev);
+				return NSS_CAPWAPMGR_FAILURE_IP_DESTROY_RULE;
+			}
+
+			t->tunnel_state &= ~NSS_CAPWAPMGR_TUNNEL_STATE_IPRULE_CONFIGURED;
+		}
+
+		src_interface_num_temp = t->ip_rule.v4.src_interface_num;
+		t->ip_rule.v4.src_interface_num = src_interface_num;
+		nss_capwapmgr_configure_ipv4(&t->ip_rule.v4, 0, 0);
+		if (nss_status != NSS_TX_SUCCESS) {
+			nss_capwapmgr_warn("%px: configure ipv4 rule failed : %d\n", dev, nss_status);
+			t->ip_rule.v4.src_interface_num = src_interface_num_temp;
+			dev_put(dev);
+			return NSS_CAPWAPMGR_FAILURE_IP_RULE;
+		}
+	} else {
+		/*
+		 * Destroy the IP rule only if it already exist.
+		 */
+		if (t->tunnel_state & NSS_CAPWAPMGR_TUNNEL_STATE_IPRULE_CONFIGURED) {
+			struct nss_ipv6_destroy v6_destroy;
+
+			if (t->capwap_rule.which_udp == NSS_CAPWAP_TUNNEL_UDP) {
+				v6_destroy.protocol = IPPROTO_UDP;
+			} else {
+				v6_destroy.protocol = IPPROTO_UDPLITE;
+			}
+
+			v6_destroy.src_ip[0] = t->ip_rule.v6.src_ip[0];
+			v6_destroy.src_ip[1] = t->ip_rule.v6.src_ip[1];
+			v6_destroy.src_ip[2] = t->ip_rule.v6.src_ip[2];
+			v6_destroy.src_ip[3] = t->ip_rule.v6.src_ip[3];
+
+			v6_destroy.dest_ip[0] = t->ip_rule.v6.dest_ip[0];
+			v6_destroy.dest_ip[1] = t->ip_rule.v6.dest_ip[1];
+			v6_destroy.dest_ip[2] = t->ip_rule.v6.dest_ip[2];
+			v6_destroy.dest_ip[3] = t->ip_rule.v6.dest_ip[3];
+
+			v6_destroy.src_port = t->ip_rule.v6.src_port;
+			v6_destroy.dest_port = t->ip_rule.v6.dest_port;
+			nss_status = nss_capwapmgr_unconfigure_ipv6_rule(&v6_destroy);
+			if (nss_status != NSS_TX_SUCCESS) {
+				nss_capwapmgr_warn("%px: unconfigure ipv6 rule failed : %d\n", dev, nss_status);
+				dev_put(dev);
+				return NSS_CAPWAPMGR_FAILURE_IP_DESTROY_RULE;
+			}
+
+			t->tunnel_state &= ~NSS_CAPWAPMGR_TUNNEL_STATE_IPRULE_CONFIGURED;
+		}
+
+		src_interface_num_temp = t->ip_rule.v6.src_interface_num;
+		t->ip_rule.v6.src_interface_num = src_interface_num;
+		nss_capwapmgr_configure_ipv6(&t->ip_rule.v6, 0, 0);
+		if (nss_status != NSS_TX_SUCCESS) {
+			nss_capwapmgr_warn("%px: configure ipv6 rule failed : %d\n", dev, nss_status);
+			t->ip_rule.v6.src_interface_num = src_interface_num_temp;
+			dev_put(dev);
+			return NSS_CAPWAPMGR_FAILURE_IP_RULE;
+		}
+	}
+	t->tunnel_state |= NSS_CAPWAPMGR_TUNNEL_STATE_IPRULE_CONFIGURED;
+	dev_put(dev);
+	return NSS_CAPWAPMGR_SUCCESS;
+}
+EXPORT_SYMBOL(nss_capwapmgr_update_src_interface);
+
+/*
+ * nss_capwapmgr_dscp_rule_destroy()
+ *	API to destroy previously created DSCP rule.
+ */
+nss_capwapmgr_status_t nss_capwapmgr_dscp_rule_destroy(uint8_t id)
+{
+	sw_error_t rv;
+	fal_qos_cosmap_t cosmap;
+	struct nss_capwapmgr_acl *acl_rule;
+	uint8_t dev_id = NSS_CAPWAPMGR_DEV_ID;
+	uint8_t rule_nr = NSS_CAPWAPMGR_RULE_NR;
+	uint8_t group_id = NSS_CAPWAPMGR_GROUP_ID;
+	uint8_t i, j, list_id, v4_rule_id, v6_rule_id, dscp_value, dscp_mask;
+
+	for (i = 0; i < NSS_CAPWAPMGR_ACL_LIST_CNT; i++) {
+		for (j = 0; j < NSS_CAPWAPMGR_ACL_RULES_PER_LIST; j++) {
+			if (global.acl_list[i].rule[j].uid == id) {
+				acl_rule = &global.acl_list[i].rule[j];
+				goto found;
+			}
+		}
+	}
+
+	nss_capwapmgr_warn("Invalid id: %u\n", id);
+	return NSS_CAPWAPMGR_FAILURE_DSCP_RULE_ID_INVALID;
+
+found:
+	if (!acl_rule->in_use) {
+		nss_capwapmgr_warn("Rule matching id: %d not in use\n", id);
+		return NSS_CAPWAPMGR_FAILURE_DSCP_RULE_ID_NOT_IN_USE;
+	}
+
+	dscp_value = acl_rule->dscp_value;
+	dscp_mask = acl_rule->dscp_mask;
+
+	/*
+	 * Reset all classification fields on cosmap table.
+	 */
+	cosmap.internal_pcp = 0;
+	cosmap.internal_dei = 0;
+	cosmap.internal_pri = 0;
+	cosmap.internal_dscp = 0;
+	cosmap.internal_dp = 0;
+
+	for (i = 0; i < NSS_CAPWAPMGR_DSCP_MAX; i++) {
+		if ((i & dscp_mask) != dscp_value) {
+			continue;
+		}
+
+		nss_capwapmgr_trace("dscpmap: resetting for dscp %u\n", i);
+		rv = fal_qos_cosmap_dscp_set(dev_id, group_id, i, &cosmap);
+		if (rv != SW_OK) {
+			nss_capwapmgr_warn("Failed to reset cosmap for dscp %d - code: %d\n", i, rv);
+			return NSS_CAPWAPMGR_FAILURE_DSCP_RULE_DELETE_FAILED;
+		}
+	}
+
+	/*
+	 * Since we use 2 ACL entries per rule (i.e. v4/v6) we multiply by
+	 * two to get rule_ids.
+	 */
+	v4_rule_id = acl_rule->rule_id * 2;
+	v6_rule_id = v4_rule_id + 1;
+	list_id = NSS_CAPWAPMGR_ACL_LIST_START + acl_rule->list_id;
+
+	rv = fal_acl_rule_delete(dev_id, list_id, v6_rule_id, rule_nr);
+	if (rv != SW_OK) {
+		nss_capwapmgr_warn("Failed to del ACL v6_rule %d from list %d - code: %d\n", v6_rule_id, list_id, rv);
+		return NSS_CAPWAPMGR_FAILURE_DSCP_RULE_DELETE_FAILED;
+	}
+
+	rv = fal_acl_rule_delete(dev_id, list_id, v4_rule_id, rule_nr);
+	if (rv != SW_OK) {
+		nss_capwapmgr_warn("Failed to del ACL v4_rule %d from list %d - code: %d\n", v4_rule_id, list_id, rv);
+		return NSS_CAPWAPMGR_FAILURE_DSCP_RULE_DELETE_FAILED;
+	}
+
+	acl_rule->in_use = false;
+	return NSS_CAPWAPMGR_SUCCESS;
+}
+EXPORT_SYMBOL(nss_capwapmgr_dscp_rule_destroy);
+
+/*
+ * nss_capwapmgr_dscp_rule_create()
+ *	API to prioritize packets based on DSCP.
+ */
+nss_capwapmgr_status_t nss_capwapmgr_dscp_rule_create(uint8_t dscp_value, uint8_t dscp_mask, uint8_t pri, uint8_t *id)
+{
+	sw_error_t rv;
+	fal_qos_cosmap_t cosmap;
+	fal_qos_cosmap_t *orig_cosmap;
+	fal_acl_rule_t *acl_rule;
+	uint8_t dev_id = NSS_CAPWAPMGR_DEV_ID;
+	uint8_t group_id = NSS_CAPWAPMGR_GROUP_ID;
+	uint8_t rule_nr = NSS_CAPWAPMGR_RULE_NR;
+	uint8_t list_id, v4_rule_id, v6_rule_id;
+	uint8_t lid, rid, i, j;
+	int8_t err, fail_dscp;
+	int8_t uid = -1;
+
+	nss_capwapmgr_info("Setting priority %u for dscp %u mask %u\n", pri, dscp_value, dscp_mask);
+
+	orig_cosmap = kzalloc(NSS_CAPWAPMGR_DSCP_MAX * sizeof(*orig_cosmap), GFP_KERNEL);
+	if (!orig_cosmap) {
+		nss_capwapmgr_warn("Failed to alloc memory for orig_cosmap\n");
+		return NSS_CAPWAPMGR_FAILURE_MEM_UNAVAILABLE;
+	}
+
+	acl_rule = kzalloc(sizeof(*acl_rule), GFP_KERNEL);
+	if (!acl_rule) {
+		nss_capwapmgr_warn("Failed to alloc memory for acl_rule\n");
+		kfree(orig_cosmap);
+		return NSS_CAPWAPMGR_FAILURE_MEM_UNAVAILABLE;
+	}
+
+	/*
+	 * Get an empty acl rule.
+	 */
+	for (i = 0; i < NSS_CAPWAPMGR_ACL_LIST_CNT; i++) {
+		for (j = 0; j < NSS_CAPWAPMGR_ACL_RULES_PER_LIST; j++) {
+			if (global.acl_list[i].rule[j].in_use) {
+				continue;
+			}
+
+			uid = global.acl_list[i].rule[j].uid;
+			rid = global.acl_list[i].rule[j].rule_id;
+			lid = global.acl_list[i].rule[j].list_id;
+			goto found;
+		}
+	}
+
+found:
+	if (uid < 0) {
+		nss_capwapmgr_warn("No free ACL rules available\n");
+		err = NSS_CAPWAPMGR_FAILURE_ACL_UNAVAILABLE;
+		goto fail1;
+	};
+
+	/*
+	 * Since we use 2 ACL entries per rule (i.e. v4/v6) we multiply rid by
+	 * two to get rule_id.
+	 */
+	v4_rule_id = rid * 2;
+	v6_rule_id = v4_rule_id + 1;
+	list_id = NSS_CAPWAPMGR_ACL_LIST_START + lid;
+
+	nss_capwapmgr_info("Using ACL rules: %d & %d from list: %d\n", v4_rule_id, v6_rule_id, list_id);
+
+	/*
+	 * Prioritize packets with the dscp value. For trustsec packets, we need to specify
+	 * the location of the dscp value with ACL configuration.
+	 * ACL rule always start from the L2 header. It will be trustsec header for our case.
+	 * We need two user defined profile to set beginning of the
+	 * Profile 0 is for start of the ethernet type.
+	 * Profile 1 is for the start of the ip header.
+	 */
+	rv = fal_acl_udf_profile_set(dev_id, FAL_ACL_UDF_NON_IP, 0, FAL_ACL_UDF_TYPE_L3, NSS_CAPWAPMGR_ETH_HDR_OFFSET);
+	if (rv != SW_OK) {
+		nss_capwapmgr_warn("Failed to create UDF 0 Map - code: %d\n", rv);
+		err = NSS_CAPWAPMGR_FAILURE_CREATE_UDF_PROFILE;
+		goto fail1;
+	}
+
+	rv = fal_acl_udf_profile_set(dev_id, FAL_ACL_UDF_NON_IP, 1, FAL_ACL_UDF_TYPE_L3, NSS_CAPWAPMGR_IPV4_OFFSET);
+	if (rv != SW_OK) {
+		nss_capwapmgr_warn("Failed to create UDF 1 Map - code: %d\n", rv);
+		err = NSS_CAPWAPMGR_FAILURE_CREATE_UDF_PROFILE;
+		goto fail1;
+	}
+
+	acl_rule->rule_type = FAL_ACL_RULE_MAC;
+
+	/*
+	 * Sets valid flags for the acl rule.
+	 * Following rules are valid:
+	 * - Ethernet type
+	 * - User defined field 0. Correspond to ethernet type (ipv4/ipv6)
+	 * - User defined field 1. Correspond to DSCP value (dscp_value)
+	 */
+	FAL_FIELD_FLG_SET(acl_rule->field_flg, FAL_ACL_FIELD_MAC_ETHTYPE);
+	FAL_FIELD_FLG_SET(acl_rule->field_flg, FAL_ACL_FIELD_UDF0);
+	FAL_FIELD_FLG_SET(acl_rule->field_flg, FAL_ACL_FIELD_UDF1);
+	FAL_ACTION_FLG_SET(acl_rule->action_flg, FAL_ACL_ACTION_PERMIT);
+	FAL_ACTION_FLG_SET(acl_rule->action_flg, FAL_ACL_ACTION_ENQUEUE_PRI);
+
+	/*
+	 * Set common parameters for ipv4/ipv6
+	 */
+	acl_rule->ethtype_val = NSS_CAPWAPMGR_ETH_TYPE_TRUSTSEC;
+	acl_rule->ethtype_mask = NSS_CAPWAPMGR_ETH_TYPE_MASK;
+	acl_rule->udf0_op = FAL_ACL_FIELD_MASK;
+	acl_rule->udf1_op = FAL_ACL_FIELD_MASK;
+	acl_rule->enqueue_pri = pri;
+
+	/*
+	 * Create ACL rule for IPv4
+	 */
+	acl_rule->udf0_val = NSS_CAPWAPMGR_ETH_TYPE_IPV4;
+	acl_rule->udf0_mask = NSS_CAPWAPMGR_ETH_TYPE_MASK;
+	acl_rule->udf1_val = dscp_value << NSS_CAPWAPMGR_DSCP_MASK_IPV4_SHIFT;
+	acl_rule->udf1_mask = dscp_mask << NSS_CAPWAPMGR_DSCP_MASK_IPV4_SHIFT;
+
+	rv = fal_acl_rule_query(dev_id, list_id, v4_rule_id, acl_rule);
+	if (rv != SW_NOT_FOUND) {
+		nss_capwapmgr_warn("ACL rule already exist for list_id: %u, rule_id: %u - code: %d\n", list_id, v4_rule_id, rv);
+		err = NSS_CAPWAPMGR_FAILURE_ACL_RULE_ALREADY_EXIST;
+		goto fail1;
+	}
+
+	rv = fal_acl_list_unbind(dev_id, list_id, FAL_ACL_DIREC_IN, FAL_ACL_BIND_PORTBITMAP, NSS_CAPWAPMGR_BIND_BITMAP);
+	if (rv != SW_OK) {
+		nss_capwapmgr_warn("Failed to unbind list: %d - code: %d\n", list_id, rv);
+		err = NSS_CAPWAPMGR_FAILURE_ADD_ACL_RULE;
+		goto fail1;
+	}
+
+	rv = fal_acl_rule_add(dev_id, list_id, v4_rule_id, rule_nr, acl_rule);
+	if (rv != SW_OK) {
+		nss_capwapmgr_warn("Failed to add ACL v4_rule: %d - code: %d\n", rv, v4_rule_id);
+		err = NSS_CAPWAPMGR_FAILURE_ADD_ACL_RULE;
+		goto fail1;
+	}
+
+	/*
+	 * Create ACL rule for IPv6
+	 */
+	acl_rule->udf0_val = NSS_CAPWAPMGR_ETH_TYPE_IPV6;
+	acl_rule->udf0_mask = NSS_CAPWAPMGR_ETH_TYPE_MASK;
+	acl_rule->udf1_val = dscp_value << NSS_CAPWAPMGR_DSCP_MASK_IPV6_SHIFT;
+	acl_rule->udf1_mask = dscp_mask << NSS_CAPWAPMGR_DSCP_MASK_IPV6_SHIFT;
+
+	rv = fal_acl_rule_add(dev_id, list_id, v6_rule_id, rule_nr, acl_rule);
+	if (rv != SW_OK) {
+		nss_capwapmgr_warn("Failed to add ACL v6_rule: %d - code: %d\n", rv, v6_rule_id);
+		err = NSS_CAPWAPMGR_FAILURE_ADD_ACL_RULE;
+		goto fail2;
+	}
+
+	/*
+	 * Bind list to all ethernet ports
+	 */
+	rv = fal_acl_list_bind(dev_id, list_id, FAL_ACL_DIREC_IN, FAL_ACL_BIND_PORTBITMAP, NSS_CAPWAPMGR_BIND_BITMAP);
+	if (rv != SW_OK) {
+		nss_capwapmgr_warn("Failed to bind ACL list: %d - code: %d\n", list_id, rv);
+		err = NSS_CAPWAPMGR_FAILURE_BIND_ACL_LIST;
+		goto fail3;
+	}
+
+	/*
+	 * Set ACL as in_use and save dscp value and mask.
+	 */
+	global.acl_list[lid].rule[rid].in_use = true;
+	global.acl_list[lid].rule[rid].dscp_value = dscp_value;
+	global.acl_list[lid].rule[rid].dscp_mask = dscp_mask;
+
+	/*
+	 * Prioritize packets with the dscp value is dscp_value for non trustsec packets.
+	 * These packets do not require any ACL Rule.
+	 */
+	cosmap.internal_pcp = 0;
+	cosmap.internal_dei = 0;
+	cosmap.internal_pri = pri;
+	cosmap.internal_dscp = 0;
+	cosmap.internal_dp = 0;
+	for (i = 0; i < NSS_CAPWAPMGR_DSCP_MAX; i++) {
+		if ((i & dscp_mask) != dscp_value) {
+			continue;
+		}
+
+		rv = fal_qos_cosmap_dscp_get(dev_id, group_id, i, &orig_cosmap[i]);
+		if (rv != SW_OK) {
+			nss_capwapmgr_warn("dscpmap: failed to get cosmap for dscp %d\n", i);
+			err = NSS_CAPWAPMGR_FAILURE_CONFIGURE_DSCP_MAP;
+			goto fail4;
+		}
+
+		nss_capwapmgr_trace("dscpmap: setting priority %u for dscp %u\n", pri, i);
+		rv = fal_qos_cosmap_dscp_set(dev_id, group_id, i, &cosmap);
+		if (rv != SW_OK) {
+			nss_capwapmgr_warn("Failed to configure cosmap for dscp %d - code: %d\n", i, rv);
+			err = NSS_CAPWAPMGR_FAILURE_CONFIGURE_DSCP_MAP;
+			goto fail4;
+		}
+	}
+
+	kfree(acl_rule);
+	kfree(orig_cosmap);
+
+	*id = uid;
+
+	return NSS_CAPWAPMGR_SUCCESS;
+
+fail4:
+	fail_dscp = i;
+	for (i = 0; i < fail_dscp; i++) {
+		if ((i & dscp_mask) != dscp_value) {
+			continue;
+		}
+
+		nss_capwapmgr_trace("dscpmap: resetting to priority %u for dscp %u\n", orig_cosmap[i].internal_pri, i);
+		rv = fal_qos_cosmap_dscp_set(dev_id, group_id, i, &orig_cosmap[i]);
+		if (rv != SW_OK) {
+			nss_capwapmgr_warn("Failed to reset cosmap for dscp %d - code: %d\n", i, rv);
+		}
+	}
+
+fail3:
+	rv = fal_acl_rule_delete(dev_id, list_id, v6_rule_id, rule_nr);
+	if (rv != SW_OK) {
+		nss_capwapmgr_warn("Failed to del ACL v6_rule %d from list %d - code: %d\n", v6_rule_id, list_id, rv);
+	}
+
+fail2:
+	rv = fal_acl_rule_delete(dev_id, list_id, v4_rule_id, rule_nr);
+	if (rv != SW_OK) {
+		nss_capwapmgr_warn("Failed to del ACL v4_rule %d from list %d - code: %d\n", v4_rule_id, list_id, rv);
+	}
+
+fail1:
+	kfree(orig_cosmap);
+	kfree(acl_rule);
+
+	return err;
+}
+EXPORT_SYMBOL(nss_capwapmgr_dscp_rule_create);
+
+/*
+ * nss_capwapmgr_configure_dtls
+ *	Enable or disable DTLS of a capwap tunnel
+ */
+nss_capwapmgr_status_t nss_capwapmgr_configure_dtls(struct net_device *dev, uint8_t tunnel_id, uint8_t enable_dtls, struct nss_dtlsmgr_config *in_data)
+{
+	struct nss_capwapmgr_priv *priv;
+	struct nss_capwap_msg capwapmsg_inner, capwapmsg_outer;
+	struct nss_capwapmgr_tunnel *t;
+	struct nss_ipv4_destroy v4;
+	struct nss_ipv6_destroy v6;
+	nss_tx_status_t nss_status = NSS_TX_SUCCESS;
+	nss_capwapmgr_status_t status;
+	uint32_t ip_if_num, dtls_enabled, outer_trustsec_enabled;
+
+	dev_hold(dev);
+	t = nss_capwapmgr_verify_tunnel_param(dev, tunnel_id);
+	if (!t) {
+		nss_capwapmgr_warn("%px: can't find tunnel: %d\n", dev, tunnel_id);
+		dev_put(dev);
+		return NSS_CAPWAPMGR_FAILURE_BAD_PARAM;
+	}
+
+	outer_trustsec_enabled = t->capwap_rule.enabled_features & NSS_CAPWAPMGR_FEATURE_OUTER_TRUSTSEC_ENABLED;
+	dtls_enabled = t->capwap_rule.enabled_features & NSS_CAPWAPMGR_FEATURE_DTLS_ENABLED;
+	if ((enable_dtls && dtls_enabled) || (!enable_dtls && !dtls_enabled)) {
+		nss_capwapmgr_warn("%px: nothing changed for tunnel: %d\n", dev, tunnel_id);
+		dev_put(dev);
+		return NSS_CAPWAPMGR_FAILURE_BAD_PARAM;
+	}
+
+	/*
+	 * We don't allow configuring dtls on tunnel if it's still
+	 * enabled.
+	 */
+	if (t->tunnel_state & NSS_CAPWAPMGR_TUNNEL_STATE_ENABLED) {
+		nss_capwapmgr_warn("%px: tunnel %d is already enabled\n", dev, tunnel_id);
+		dev_put(dev);
+		return NSS_CAPWAPMGR_FAILURE_TUNNEL_ENABLED;
+	}
+
+	/*
+	 * Prepare DTLS configure message
+	 */
+	memset(&capwapmsg_inner, 0, sizeof(struct nss_capwap_msg));
+	nss_capwap_msg_init(&capwapmsg_inner, t->if_num_inner, NSS_CAPWAP_MSG_TYPE_DTLS,
+		sizeof(struct nss_capwap_dtls_msg), nss_capwapmgr_msg_event_receive, dev);
+
+	memset(&capwapmsg_outer, 0, sizeof(struct nss_capwap_msg));
+	nss_capwap_msg_init(&capwapmsg_outer, t->if_num_outer, NSS_CAPWAP_MSG_TYPE_DTLS,
+		sizeof(struct nss_capwap_dtls_msg), nss_capwapmgr_msg_event_receive, dev);
+
+
+	if (!enable_dtls) {
+		nss_capwapmgr_info("%px disabling DTLS for tunnel: %d\n", dev, tunnel_id);
+
+		ip_if_num = nss_capwap_ifnum_with_core_id(t->if_num_outer);
+		capwapmsg_inner.msg.dtls.enable = 0;
+		capwapmsg_inner.msg.dtls.dtls_inner_if_num = t->capwap_rule.dtls_inner_if_num;
+		capwapmsg_inner.msg.dtls.mtu_adjust = 0;
+
+		capwapmsg_outer.msg.dtls.enable = 0;
+
+		/*
+		 * Unconfigure trustsec tx first
+		 */
+		if (outer_trustsec_enabled) {
+			nss_status = nss_trustsec_tx_unconfigure_sgt(t->capwap_rule.dtls_inner_if_num, t->capwap_rule.outer_sgt_value);
+			if (nss_status != NSS_TX_SUCCESS) {
+				nss_capwapmgr_warn("%px: unconfigure trustsec_tx failed\n", dev);
+				dev_put(dev);
+				return NSS_CAPWAPMGR_FAILURE_UNCONFIGURE_TRUSTSEC_TX;
+			}
+		}
+	} else {
+		nss_capwapmgr_info("%px enabling DTLS for tunnel: %d\n", dev, tunnel_id);
+
+		if (!t->capwap_rule.dtls_inner_if_num) {
+			/*
+			 * Create a DTLS node, we only validate caller is providing a DTLS
+			 * configuration structure, the correctness of these settings are
+			 * validated by dtlsmgr
+			 */
+			if (!in_data) {
+				nss_capwapmgr_info("%px: dtls in_data required to create dtls tunnel\n", dev);
+				dev_put(dev);
+				return NSS_CAPWAPMGR_FAILURE_BAD_PARAM;
+			}
+
+			/*
+			 * We only support the METADATA mode for pure DTLS tunnels; in CAPWAP-DTLS
+			 * the offload will not send the packets starting with Metadata. We need to
+			 * ensure that the user does not configure this mode accidentally.
+			 */
+			in_data->flags &= ~NSS_DTLSMGR_ENCAP_METADATA;
+			in_data->decap.nexthop_ifnum = nss_capwap_ifnum_with_core_id(t->if_num_outer);
+
+			t->dtls_dev = nss_dtlsmgr_session_create(in_data);
+			if (!t->dtls_dev) {
+				nss_capwapmgr_warn("%px: cannot create DTLS session\n", dev);
+				dev_put(dev);
+				return NSS_CAPWAPMGR_FAILURE_DI_ALLOC_FAILED;
+			}
+
+			/* Store the DTLS encap and decap interface numbers */
+			t->capwap_rule.dtls_inner_if_num = nss_dtlsmgr_get_interface(t->dtls_dev,
+										     NSS_DTLSMGR_INTERFACE_TYPE_INNER);
+			t->capwap_rule.mtu_adjust = t->dtls_dev->needed_headroom + t->dtls_dev->needed_tailroom;
+			nss_capwapmgr_info("%px: created dtls node for tunnel: %d if_num: %d mtu_adjust: %d\n",
+					   dev, tunnel_id, t->capwap_rule.dtls_inner_if_num, t->capwap_rule.mtu_adjust);
+		}
+
+		ip_if_num = nss_dtlsmgr_get_interface(t->dtls_dev, NSS_DTLSMGR_INTERFACE_TYPE_OUTER);
+
+		capwapmsg_inner.msg.dtls.enable = 1;
+		capwapmsg_inner.msg.dtls.dtls_inner_if_num = t->capwap_rule.dtls_inner_if_num;
+		capwapmsg_inner.msg.dtls.mtu_adjust = t->capwap_rule.mtu_adjust;
+
+		capwapmsg_outer.msg.dtls.enable = 1;
+
+		/*
+		 * Unconfigure trustsec tx first
+		 */
+		if (outer_trustsec_enabled) {
+			nss_status = nss_trustsec_tx_unconfigure_sgt(t->if_num_outer, t->capwap_rule.outer_sgt_value);
+			if (nss_status != NSS_TX_SUCCESS) {
+				nss_capwapmgr_warn("%px: unconfigure trustsec_tx failed\n", dev);
+				dev_put(dev);
+				return NSS_CAPWAPMGR_FAILURE_UNCONFIGURE_TRUSTSEC_TX;
+			}
+		}
+	}
+
+	/*
+	 * Re-configure trustsec_tx
+	 */
+	if (outer_trustsec_enabled) {
+		nss_status = nss_trustsec_tx_configure_sgt(ip_if_num, t->capwap_rule.gmac_ifnum, t->capwap_rule.outer_sgt_value);
+		if (nss_status != NSS_TX_SUCCESS) {
+			nss_capwapmgr_warn("%px: configure trustsec_tx failed\n", dev);
+			dev_put(dev);
+			return NSS_CAPWAPMGR_FAILURE_CONFIGURE_TRUSTSEC_TX;
+		}
+	}
+
+	priv = netdev_priv(dev);
+
+	/*
+	 * Recreate ipv4/v6 rules with the new interface number
+	 */
+	if (t->capwap_rule.l3_proto == NSS_CAPWAP_TUNNEL_IPV4) {
+		if (t->tunnel_state & NSS_CAPWAPMGR_TUNNEL_STATE_IPRULE_CONFIGURED) {
+			v4.protocol = IPPROTO_UDP;
+			v4.src_ip = t->ip_rule.v4.src_ip;
+			v4.dest_ip = t->ip_rule.v4.dest_ip;
+			v4.src_port = t->ip_rule.v4.src_port;
+			v4.dest_port = t->ip_rule.v4.dest_port;
+			nss_status = nss_capwapmgr_unconfigure_ipv4_rule(&v4);
+			if (nss_status != NSS_TX_SUCCESS) {
+				nss_capwapmgr_warn("%px: unconfigure ipv4 rule failed : %d\n", dev, nss_status);
+				dev_put(dev);
+				return NSS_CAPWAPMGR_FAILURE_IP_DESTROY_RULE;
+			}
+			t->tunnel_state &= ~NSS_CAPWAPMGR_TUNNEL_STATE_IPRULE_CONFIGURED;
+		}
+
+		t->ip_rule.v4.dest_interface_num = ip_if_num;
+		nss_status = nss_capwapmgr_configure_ipv4(&t->ip_rule.v4, 0, 0);
+	} else {
+		if (t->tunnel_state & NSS_CAPWAPMGR_TUNNEL_STATE_IPRULE_CONFIGURED) {
+			if (t->capwap_rule.which_udp == NSS_CAPWAP_TUNNEL_UDP) {
+				v6.protocol = IPPROTO_UDP;
+			} else {
+				v6.protocol = IPPROTO_UDPLITE;
+			}
+
+			v6.src_ip[0] = t->ip_rule.v6.src_ip[0];
+			v6.src_ip[1] = t->ip_rule.v6.src_ip[1];
+			v6.src_ip[2] = t->ip_rule.v6.src_ip[2];
+			v6.src_ip[3] = t->ip_rule.v6.src_ip[3];
+
+			v6.dest_ip[0] = t->ip_rule.v6.dest_ip[0];
+			v6.dest_ip[1] = t->ip_rule.v6.dest_ip[1];
+			v6.dest_ip[2] = t->ip_rule.v6.dest_ip[2];
+			v6.dest_ip[3] = t->ip_rule.v6.dest_ip[3];
+
+			v6.src_port = t->ip_rule.v6.src_port;
+			v6.dest_port = t->ip_rule.v6.dest_port;
+			nss_status = nss_capwapmgr_unconfigure_ipv6_rule(&v6);
+			if (nss_status != NSS_TX_SUCCESS) {
+				nss_capwapmgr_warn("%px: unconfigure ipv6 rule failed : %d\n", dev, nss_status);
+				dev_put(dev);
+				return NSS_CAPWAPMGR_FAILURE_IP_DESTROY_RULE;
+			}
+			t->tunnel_state &= ~NSS_CAPWAPMGR_TUNNEL_STATE_IPRULE_CONFIGURED;
+		}
+		t->ip_rule.v6.dest_interface_num = ip_if_num;
+		nss_status = nss_capwapmgr_configure_ipv6(&t->ip_rule.v6, 0, 0);
+	}
+
+	if (nss_status != NSS_TX_SUCCESS) {
+		nss_capwapmgr_warn("%px: configure ip rule failed : %d\n", dev, nss_status);
+		dev_put(dev);
+		return NSS_CAPWAPMGR_FAILURE_IP_RULE;
+	}
+
+	/*
+	 * Now configure capwap dtls
+	 */
+	t->tunnel_state |= NSS_CAPWAPMGR_TUNNEL_STATE_IPRULE_CONFIGURED;
+	status = nss_capwapmgr_tx_msg_sync(priv->nss_ctx, dev, &capwapmsg_inner);
+	if (status != NSS_CAPWAPMGR_SUCCESS) {
+		nss_capwapmgr_warn("%px: configure DTLS failed for inner node: %d\n", dev, status);
+		dev_put(dev);
+		return status;
+	}
+
+	status = nss_capwapmgr_tx_msg_sync(priv->nss_ctx, dev, &capwapmsg_outer);
+	if (status != NSS_CAPWAPMGR_SUCCESS) {
+		nss_capwapmgr_warn("%px: configure DTLS failed for outer node: %d\n", dev, status);
+		dev_put(dev);
+		return status;
+	}
+
+	if (enable_dtls) {
+		t->capwap_rule.enabled_features |= NSS_CAPWAPMGR_FEATURE_DTLS_ENABLED;
+	} else {
+		t->capwap_rule.enabled_features &= ~NSS_CAPWAPMGR_FEATURE_DTLS_ENABLED;
+	}
+	dev_put(dev);
+	return NSS_CAPWAPMGR_SUCCESS;
+}
+EXPORT_SYMBOL(nss_capwapmgr_configure_dtls);
+
+/*
+ * nss_capwapmgr_verify_dtls_rekey_param()
+ *	Validate the rekey param for a DTLS tunnel and return the DTLS netdevice
+ *
+ *  The caller should hold the reference on the net device before calling.
+ */
+static inline struct net_device *nss_capwapmgr_verify_dtls_rekey_param(struct net_device *dev, uint8_t tunnel_id,
+								 struct nss_dtlsmgr_config_update *udata)
+{
+	struct nss_capwapmgr_tunnel *t;
+
+	if (!udata) {
+		nss_capwapmgr_info("%px: dtls session update data required\n", dev);
+		return NULL;
+	}
+
+	t = nss_capwapmgr_verify_tunnel_param(dev, tunnel_id);
+	if (!t) {
+		nss_capwapmgr_warn("%px: can't find tunnel: %d\n", dev, tunnel_id);
+		return NULL;
+	}
+
+	if (!(t->capwap_rule.enabled_features & NSS_CAPWAPMGR_FEATURE_DTLS_ENABLED)) {
+		nss_capwapmgr_warn("%px: tunnel does not enable DTLS: %d\n", dev, tunnel_id);
+		return NULL;
+	}
+
+	return t->dtls_dev;
+}
+
+/*
+ * nss_capwapmgr_dtls_rekey_rx_cipher_update()
+ *	Update the rx cipher key for an DTLS tunnel
+ */
+nss_capwapmgr_status_t nss_capwapmgr_dtls_rekey_rx_cipher_update(struct net_device *dev, uint8_t tunnel_id,
+								 struct nss_dtlsmgr_config_update *udata)
+{
+	struct net_device *dtls_ndev;
+
+	dev_hold(dev);
+	dtls_ndev = nss_capwapmgr_verify_dtls_rekey_param(dev, tunnel_id, udata);
+	dev_put(dev);
+
+	if (!dtls_ndev) {
+		goto fail;
+	}
+
+	/*
+	 * Calling dtlsmgr for rekey
+	 */
+	if (nss_dtlsmgr_session_update_decap(dtls_ndev, udata) != NSS_DTLSMGR_OK) {
+		goto fail;
+	}
+	return NSS_CAPWAPMGR_SUCCESS;
+
+fail:
+	nss_capwapmgr_warn("%px: tunnel: %d rekey rx cipher update failed\n", dtls_ndev, tunnel_id);
+	return NSS_CAPWAPMGR_FAILURE_INVALID_DTLS_CFG;
+}
+EXPORT_SYMBOL(nss_capwapmgr_dtls_rekey_rx_cipher_update);
+
+/*
+ * nss_capwapmgr_dtls_rekey_tx_cipher_update()
+ *	Update the tx cipher key for an DTLS tunnel
+ */
+nss_capwapmgr_status_t nss_capwapmgr_dtls_rekey_tx_cipher_update(struct net_device *dev, uint8_t tunnel_id,
+								 struct nss_dtlsmgr_config_update *udata)
+{
+	struct net_device *dtls_ndev;
+
+	dev_hold(dev);
+	dtls_ndev = nss_capwapmgr_verify_dtls_rekey_param(dev, tunnel_id, udata);
+	dev_put(dev);
+
+	if (!dtls_ndev) {
+		goto fail;
+	}
+
+	/*
+	 * Calling dtlsmgr for rekey
+	 */
+	if (nss_dtlsmgr_session_update_encap(dtls_ndev, udata) != NSS_DTLSMGR_OK) {
+		goto fail;
+	}
+	return NSS_CAPWAPMGR_SUCCESS;
+
+fail:
+	nss_capwapmgr_warn("%px: tunnel: %d rekey rx cipher update failed\n", dtls_ndev, tunnel_id);
+	return NSS_CAPWAPMGR_FAILURE_INVALID_DTLS_CFG;
+}
+EXPORT_SYMBOL(nss_capwapmgr_dtls_rekey_tx_cipher_update);
+
+/*
+ * nss_capwapmgr_dtls_rekey_rx_cipher_switch()
+ *	Switch the rx cipher key for an DTLS tunnel
+ */
+nss_capwapmgr_status_t nss_capwapmgr_dtls_rekey_rx_cipher_switch(struct net_device *dev, uint8_t tunnel_id)
+{
+	struct nss_capwapmgr_tunnel *t;
+	nss_capwapmgr_status_t status = NSS_CAPWAPMGR_SUCCESS;
+
+	dev_hold(dev);
+	t = nss_capwapmgr_verify_tunnel_param(dev, tunnel_id);
+	if (!t) {
+		nss_capwapmgr_warn("%px: can't find tunnel: %d\n", dev, tunnel_id);
+		status = NSS_CAPWAPMGR_FAILURE_BAD_PARAM;
+		goto done;
+	}
+
+	if (!(t->capwap_rule.enabled_features & NSS_CAPWAPMGR_FEATURE_DTLS_ENABLED)) {
+		nss_capwapmgr_warn("%px: tunnel does not enable DTLS: %d\n", dev, tunnel_id);
+		status = NSS_CAPWAPMGR_FAILURE_BAD_PARAM;
+		goto done;
+	}
+
+	/*
+	 * Calling dtlsmgr for rekey switch
+	 */
+	if (!nss_dtlsmgr_session_switch_decap(t->dtls_dev)) {
+		nss_capwapmgr_warn("%px: tunnel: %d rekey rx cipher switch failed\n", t->dtls_dev, tunnel_id);
+		status = NSS_CAPWAPMGR_FAILURE_INVALID_DTLS_CFG;
+	}
+
+done:
+	dev_put(dev);
+	return status;
+}
+EXPORT_SYMBOL(nss_capwapmgr_dtls_rekey_rx_cipher_switch);
+
+/*
+ * nss_capwapmgr_dtls_rekey_tx_cipher_switch()
+ *	Switch the tx cipher key for an DTLS tunnel
+ */
+nss_capwapmgr_status_t nss_capwapmgr_dtls_rekey_tx_cipher_switch(struct net_device *dev, uint8_t tunnel_id)
+{
+	struct nss_capwapmgr_tunnel *t;
+	nss_capwapmgr_status_t status = NSS_CAPWAPMGR_SUCCESS;
+
+	dev_hold(dev);
+	t = nss_capwapmgr_verify_tunnel_param(dev, tunnel_id);
+	if (!t) {
+		nss_capwapmgr_warn("%px: can't find tunnel: %d\n", dev, tunnel_id);
+		status = NSS_CAPWAPMGR_FAILURE_BAD_PARAM;
+		goto done;
+	}
+
+	if (!(t->capwap_rule.enabled_features & NSS_CAPWAPMGR_FEATURE_DTLS_ENABLED)) {
+		nss_capwapmgr_warn("%px: tunnel does not enable DTLS: %d\n", dev, tunnel_id);
+		status = NSS_CAPWAPMGR_FAILURE_BAD_PARAM;
+		goto done;
+	}
+
+	/*
+	 * Calling dtlsmgr for rekey switch
+	 */
+	if (!nss_dtlsmgr_session_switch_encap(t->dtls_dev)) {
+		nss_capwapmgr_warn("%px: tunnel: %d rekey tx cipher switch failed\n", t->dtls_dev, tunnel_id);
+		status = NSS_CAPWAPMGR_FAILURE_INVALID_DTLS_CFG;
+	}
+
+done:
+	dev_put(dev);
+	return status;
+}
+EXPORT_SYMBOL(nss_capwapmgr_dtls_rekey_tx_cipher_switch);
+
+/*
+ * nss_capwapmgr_change_version()
+ *	Change CAPWAP version
+ */
+
+nss_capwapmgr_status_t nss_capwapmgr_change_version(struct net_device *dev, uint8_t tunnel_id, uint8_t ver)
+{
+	struct nss_capwapmgr_priv *priv;
+	struct nss_capwap_msg capwapmsg;
+	struct nss_capwapmgr_tunnel *t;
+	nss_capwapmgr_status_t status = NSS_CAPWAPMGR_SUCCESS;
+
+	dev_hold(dev);
+	t = nss_capwapmgr_verify_tunnel_param(dev, tunnel_id);
+	if (!t) {
+		nss_capwapmgr_warn("%px: can't find tunnel: %d\n", dev, tunnel_id);
+		status = NSS_CAPWAPMGR_FAILURE_BAD_PARAM;
+		goto done;
+	}
+
+	if (ver > NSS_CAPWAP_VERSION_V2) {
+		nss_capwapmgr_warn("%px: un-supported Version: %d\n", dev, ver);
+		status = NSS_CAPWAPMGR_FAILURE_BAD_PARAM;
+		goto done;
+	}
+
+	priv = netdev_priv(dev);
+
+	/*
+	 * Prepare the tunnel configuration parameter to send to NSS FW
+	 */
+	memset(&capwapmsg, 0, sizeof(struct nss_capwap_msg));
+
+	/*
+	 * Send CAPWAP data tunnel command to NSS
+	 */
+	nss_capwap_msg_init(&capwapmsg, t->if_num_inner, NSS_CAPWAP_MSG_TYPE_VERSION,
+		sizeof(struct nss_capwap_version_msg), nss_capwapmgr_msg_event_receive, dev);
+	capwapmsg.msg.version.version = ver;
+	status = nss_capwapmgr_tx_msg_sync(priv->nss_ctx, dev, &capwapmsg);
+	if (status != NSS_CAPWAPMGR_SUCCESS) {
+		nss_capwapmgr_warn("%px: Update Path MTU Tunnel error : %d \n", dev, status);
+	}
+
+done:
+	dev_put(dev);
+	return status;
+}
+EXPORT_SYMBOL(nss_capwapmgr_change_version);
+
+/*
+ * nss_capwapmgr_enable_tunnel()
+ *	API for enabling a data tunnel
+ */
+nss_capwapmgr_status_t nss_capwapmgr_enable_tunnel(struct net_device *dev, uint8_t tunnel_id)
+{
+	struct nss_capwapmgr_priv *priv;
+	struct nss_capwapmgr_tunnel *t;
+	nss_capwapmgr_status_t status = NSS_CAPWAPMGR_SUCCESS;
+
+	dev_hold(dev);
+	t = nss_capwapmgr_verify_tunnel_param(dev, tunnel_id);
+	if (!t) {
+		nss_capwapmgr_warn("%px: can't find tunnel: %d\n", dev, tunnel_id);
+		status = NSS_CAPWAPMGR_FAILURE_BAD_PARAM;
+		goto done;
+	}
+
+	if (t->tunnel_state & NSS_CAPWAPMGR_TUNNEL_STATE_ENABLED) {
+		nss_capwapmgr_warn("%px: tunnel %d is already enabled\n", dev, tunnel_id);
+		status = NSS_CAPWAPMGR_FAILURE_TUNNEL_ENABLED;
+		goto done;
+	}
+
+	priv = netdev_priv(dev);
+	nss_capwapmgr_info("%px: Inner:%d Outer:%d. Tunnel enable is being called\n", dev, t->if_num_inner, t->if_num_outer);
+
+	status = nss_capwapmgr_tx_msg_enable_tunnel(priv->nss_ctx, dev, t->if_num_inner,t->if_num_outer);
+	if (status != NSS_CAPWAPMGR_SUCCESS) {
+		goto done;
+	}
+
+	status = nss_capwapmgr_tx_msg_enable_tunnel(priv->nss_ctx, dev, t->if_num_outer,t->if_num_inner);
+	if(status != NSS_CAPWAPMGR_SUCCESS) {
+		nss_capwapmgr_tunnel_action(priv->nss_ctx, dev, t->if_num_inner,NSS_CAPWAP_MSG_TYPE_DISABLE_TUNNEL);
+		goto done;
+	}
+
+	t->tunnel_state |= NSS_CAPWAPMGR_TUNNEL_STATE_ENABLED;
+
+done:
+	dev_put(dev);
+	return status;
+}
+EXPORT_SYMBOL(nss_capwapmgr_enable_tunnel);
+
+/*
+ * nss_capwapmgr_disable_tunnel()
+ *	API for disabling a data tunnel
+ */
+nss_capwapmgr_status_t nss_capwapmgr_disable_tunnel(struct net_device *dev, uint8_t tunnel_id)
+{
+	struct nss_capwapmgr_priv *priv;
+	struct nss_capwapmgr_tunnel *t;
+	nss_capwapmgr_status_t status = NSS_CAPWAPMGR_SUCCESS;
+
+	dev_hold(dev);
+	t = nss_capwapmgr_verify_tunnel_param(dev, tunnel_id);
+	if (!t) {
+		nss_capwapmgr_warn("%px: can't find tunnel: %d\n", dev, tunnel_id);
+		status = NSS_CAPWAPMGR_FAILURE_BAD_PARAM;
+		goto done;
+	}
+
+	if (!(t->tunnel_state & NSS_CAPWAPMGR_TUNNEL_STATE_ENABLED)) {
+		nss_capwapmgr_warn("%px: tunnel %d is already disabled\n", dev, tunnel_id);
+		status = NSS_CAPWAPMGR_FAILURE_TUNNEL_DISABLED;
+		goto done;
+	}
+
+	priv = netdev_priv(dev);
+	nss_capwapmgr_info("%px: Inner:%d Outer:%d. Tunnel disable is being called\n", dev, t->if_num_inner, t->if_num_outer);
+
+	status = nss_capwapmgr_tunnel_action(priv->nss_ctx, dev, t->if_num_inner,NSS_CAPWAP_MSG_TYPE_DISABLE_TUNNEL);
+	if (status != NSS_CAPWAPMGR_SUCCESS) {
+		status = NSS_CAPWAPMGR_FAILURE_TUNNEL_DISABLED;
+		nss_capwapmgr_warn("%px: tunnel %d disable failed\n", dev, tunnel_id);
+		goto done;
+	}
+
+	status = nss_capwapmgr_tunnel_action(priv->nss_ctx, dev, t->if_num_outer,NSS_CAPWAP_MSG_TYPE_DISABLE_TUNNEL);
+	if (status != NSS_CAPWAPMGR_SUCCESS) {
+		nss_capwapmgr_warn("%px: tunnel %d disable failed\n", dev, tunnel_id);
+		nss_capwapmgr_tx_msg_enable_tunnel(priv->nss_ctx, dev, t->if_num_inner, t->if_num_outer);
+		goto done;
+	}
+
+	t->tunnel_state &= ~NSS_CAPWAPMGR_TUNNEL_STATE_ENABLED;
+
+done:
+	dev_put(dev);
+	return status;
+}
+EXPORT_SYMBOL(nss_capwapmgr_disable_tunnel);
+
+/*
+ * nss_capwapmgr_tunnel_create_common()
+ *	Common handling for creating IPv4 or IPv6 tunnel
+ */
+static nss_capwapmgr_status_t nss_capwapmgr_tunnel_create_common(struct net_device *dev, uint8_t tunnel_id,
+	struct nss_ipv4_create *v4, struct nss_ipv6_create *v6, struct nss_capwap_rule_msg *capwap_rule, struct nss_dtlsmgr_config *in_data)
+{
+	struct nss_capwapmgr_priv *priv;
+	struct nss_capwapmgr_tunnel *t;
+	nss_capwapmgr_status_t status = NSS_CAPWAPMGR_SUCCESS;
+	int32_t capwap_if_num_inner, capwap_if_num_outer, forward_if_num;
+	uint16_t type_flags = 0;
+	nss_tx_status_t nss_status = NSS_TX_SUCCESS;
+	uint32_t dtls_enabled = capwap_rule->enabled_features & NSS_CAPWAPMGR_FEATURE_DTLS_ENABLED;
+	uint32_t outer_trustsec_enabled = capwap_rule->enabled_features & NSS_CAPWAPMGR_FEATURE_OUTER_TRUSTSEC_ENABLED;
+
+	if (!v4 && !v6) {
+		nss_capwapmgr_warn("%px: invalid ip create rule for tunnel: %d\n", dev, tunnel_id);
+		return NSS_CAPWAPMGR_FAILURE_BAD_PARAM;
+	}
+
+	if (tunnel_id > NSS_CAPWAPMGR_MAX_TUNNELS) {
+		nss_capwapmgr_warn("%px: invalid tunnel_id: %d max: NSS_CAPWAPMGR_MAX_TUNNELS\n", dev, tunnel_id);
+		return NSS_CAPWAPMGR_FAILURE_BAD_PARAM;
+	}
+
+	if (!(capwap_rule->l3_proto == NSS_CAPWAP_TUNNEL_IPV4 ||
+		capwap_rule->l3_proto == NSS_CAPWAP_TUNNEL_IPV6)) {
+		nss_capwapmgr_warn("%px: tunnel %d: wrong argument for l3_proto\n", dev, tunnel_id);
+		return NSS_CAPWAPMGR_FAILURE_BAD_PARAM;
+	}
+
+	if (!(capwap_rule->which_udp == NSS_CAPWAP_TUNNEL_UDP ||
+		capwap_rule->which_udp == NSS_CAPWAP_TUNNEL_UDPLite)) {
+		nss_capwapmgr_warn("%px: tunnel %d: wrong argument for which_udp\n", dev, tunnel_id);
+		return NSS_CAPWAPMGR_FAILURE_BAD_PARAM;
+	}
+
+	if (dtls_enabled && !in_data) {
+		nss_capwapmgr_warn("%px: need to supply in_data if DTLS is enabled\n", dev);
+		return NSS_CAPWAPMGR_FAILURE_BAD_PARAM;
+	}
+
+	dev_hold(dev);
+	t = nss_capwapmgr_verify_tunnel_param(dev, tunnel_id);
+	if (t) {
+		nss_capwapmgr_warn("%px: tunnel: %d already created\n", dev, tunnel_id);
+		status = NSS_CAPWAPMGR_FAILURE_TUNNEL_EXISTS;
+		goto done;
+	}
+
+	capwap_if_num_inner = nss_dynamic_interface_alloc_node(NSS_DYNAMIC_INTERFACE_TYPE_CAPWAP_HOST_INNER);
+	if (capwap_if_num_inner < 0) {
+		nss_capwapmgr_warn("%px: di returned error : %d\n", dev, capwap_if_num_inner);
+		status = NSS_CAPWAPMGR_FAILURE_DI_ALLOC_FAILED;
+		goto done;
+	}
+
+	if (nss_capwapmgr_register_with_nss(capwap_if_num_inner, dev) != NSS_CAPWAPMGR_SUCCESS) {
+		nss_capwapmgr_warn("%d: NSS CAPWAP register with NSS failed", capwap_if_num_inner);
+		status = NSS_CAPWAPMGR_FAILURE_REGISTER_NSS;
+		goto fail1;
+	}
+
+	capwap_if_num_outer = nss_dynamic_interface_alloc_node(NSS_DYNAMIC_INTERFACE_TYPE_CAPWAP_OUTER);
+	if (capwap_if_num_outer < 0) {
+		nss_capwapmgr_warn("%px: di returned error : %d\n", dev, capwap_if_num_outer);
+		status = NSS_CAPWAPMGR_FAILURE_DI_ALLOC_FAILED;
+		goto fail2;
+	}
+
+	if (nss_capwapmgr_register_with_nss(capwap_if_num_outer, dev) != NSS_CAPWAPMGR_SUCCESS) {
+		nss_capwapmgr_warn("%d: NSS CAPWAP register with NSS failed", capwap_if_num_outer);
+		status = NSS_CAPWAPMGR_FAILURE_REGISTER_NSS;
+		goto fail3;
+	}
+
+	if (!dtls_enabled) {
+		capwap_rule->mtu_adjust = 0;
+		capwap_rule->dtls_inner_if_num = 0;
+		forward_if_num = nss_capwap_ifnum_with_core_id(capwap_if_num_outer);
+	} else {
+		/*
+		 * We only support the METADATA mode for pure DTLS tunnels; in CAPWAP-DTLS
+		 * the offload will not send the packets starting with Metadata. We need to
+		 * ensure that the user does not configure this mode accidentally.
+		 */
+		in_data->flags &= ~NSS_DTLSMGR_ENCAP_METADATA;
+		in_data->decap.nexthop_ifnum = nss_capwap_ifnum_with_core_id(capwap_if_num_outer);
+
+		t->dtls_dev = nss_dtlsmgr_session_create(in_data);
+		if (!t->dtls_dev) {
+			nss_capwapmgr_warn("%px: NSS DTLS node alloc failed\n", dev);
+			status = NSS_CAPWAPMGR_FAILURE_DI_ALLOC_FAILED;
+			goto fail4;
+		}
+		capwap_rule->dtls_inner_if_num = nss_dtlsmgr_get_interface(t->dtls_dev, NSS_DTLSMGR_INTERFACE_TYPE_INNER);
+		forward_if_num = nss_dtlsmgr_get_interface(t->dtls_dev, NSS_DTLSMGR_INTERFACE_TYPE_OUTER);
+		capwap_rule->mtu_adjust = t->dtls_dev->needed_headroom + t->dtls_dev->needed_tailroom;
+	}
+
+	if (outer_trustsec_enabled) {
+		nss_capwapmgr_info("%px: configure TrustsecTx with sgt value: %x\n", dev, capwap_rule->outer_sgt_value);
+		if (v4) {
+			capwap_rule->gmac_ifnum = v4->src_interface_num;
+			nss_status = nss_trustsec_tx_configure_sgt(forward_if_num, v4->src_interface_num, capwap_rule->outer_sgt_value);
+			v4->src_interface_num = NSS_TRUSTSEC_TX_INTERFACE;
+		} else {
+			capwap_rule->gmac_ifnum = v6->src_interface_num;
+			nss_status = nss_trustsec_tx_configure_sgt(forward_if_num, v6->src_interface_num, capwap_rule->outer_sgt_value);
+			v6->src_interface_num = NSS_TRUSTSEC_TX_INTERFACE;
+		}
+		if (nss_status != NSS_TX_SUCCESS) {
+			nss_capwapmgr_warn("%px: configure trustsectx node failed\n", dev);
+			status = NSS_CAPWAPMGR_FAILURE_CONFIGURE_TRUSTSEC_TX;
+			goto fail5;
+		}
+	}
+
+	/*
+	 * We use type_flags to determine the correct header sizes
+	 * for a frame when encaping. CAPWAP processing node in the
+	 * NSS FW does not know anything about IP rule information.
+	 */
+	if (v4) {
+		if ((v4->out_vlan_tag[0] & 0xFFF) != 0xFFF) {
+			type_flags |= NSS_CAPWAP_RULE_CREATE_VLAN_CONFIGURED;
+		}
+
+		if (v4->flow_pppoe_if_exist) {
+			type_flags |= NSS_CAPWAP_RULE_CREATE_PPPOE_CONFIGURED;
+		}
+	} else {
+		if ((v6->out_vlan_tag[0] & 0xFFF) != 0xFFF) {
+			type_flags |= NSS_CAPWAP_RULE_CREATE_VLAN_CONFIGURED;
+		}
+
+		if (v6->flow_pppoe_if_exist) {
+			type_flags |= NSS_CAPWAP_RULE_CREATE_PPPOE_CONFIGURED;
+		}
+	}
+
+	if (capwap_rule->l3_proto == NSS_CAPWAP_TUNNEL_IPV6 && capwap_rule->which_udp == NSS_CAPWAP_TUNNEL_UDPLite) {
+		type_flags |= NSS_CAPWAP_ENCAP_UDPLITE_HDR_CSUM;
+	}
+
+	/*
+	 * Copy over the IP rule information to capwap rule for encap side.
+	 * This will avoid any confusions because IP rule direction is AC->AP and
+	 * capwap encap rule direction is AP->AC.
+	 */
+	if (v4) {
+		capwap_rule->encap.src_port = htonl(v4->dest_port);
+		capwap_rule->encap.src_ip.ip.ipv4 = htonl(v4->dest_ip);
+
+		capwap_rule->encap.dest_port = htonl(v4->src_port);
+		capwap_rule->encap.dest_ip.ip.ipv4 = htonl(v4->src_ip);
+	} else {
+		capwap_rule->encap.src_port = htonl(v6->dest_port);
+		capwap_rule->encap.src_ip.ip.ipv6[0] = htonl(v6->dest_ip[0]);
+		capwap_rule->encap.src_ip.ip.ipv6[1] = htonl(v6->dest_ip[1]);
+		capwap_rule->encap.src_ip.ip.ipv6[2] = htonl(v6->dest_ip[2]);
+		capwap_rule->encap.src_ip.ip.ipv6[3] = htonl(v6->dest_ip[3]);
+
+		capwap_rule->encap.dest_port = htonl(v6->src_port);
+		capwap_rule->encap.dest_ip.ip.ipv6[0] = htonl(v6->src_ip[0]);
+		capwap_rule->encap.dest_ip.ip.ipv6[1] = htonl(v6->src_ip[1]);
+		capwap_rule->encap.dest_ip.ip.ipv6[2] = htonl(v6->src_ip[2]);
+		capwap_rule->encap.dest_ip.ip.ipv6[3] = htonl(v6->src_ip[3]);
+	}
+
+	status = nss_capwapmgr_create_capwap_rule(dev, capwap_if_num_inner, capwap_rule, type_flags);
+	nss_capwapmgr_info("%px: dynamic interface if_num is :%d and capwap tunnel status:%d\n", dev, capwap_if_num_inner, status);
+	if (status != NSS_CAPWAPMGR_SUCCESS) {
+		nss_capwapmgr_warn("%px: %d: CAPWAP rule create failed with status: %d", dev, capwap_if_num_inner, status);
+		status = NSS_CAPWAPMGR_FAILURE_CAPWAP_RULE;
+		goto fail5;
+	}
+
+	status = nss_capwapmgr_create_capwap_rule(dev, capwap_if_num_outer, capwap_rule, type_flags);
+	nss_capwapmgr_info("%px: dynamic interface if_num is :%d and capwap tunnel status:%d\n", dev, capwap_if_num_outer, status);
+	if (status != NSS_CAPWAPMGR_SUCCESS) {
+		nss_capwapmgr_warn("%px: %d: CAPWAP rule create failed with status: %d", dev, capwap_if_num_outer, status);
+		status = NSS_CAPWAPMGR_FAILURE_CAPWAP_RULE;
+		goto fail5;
+	}
+
+	if (v4) {
+		v4->dest_interface_num = forward_if_num;
+		nss_status = nss_capwapmgr_configure_ipv4(v4, 0, 0);
+	} else {
+		v6->dest_interface_num = forward_if_num;
+		nss_status = nss_capwapmgr_configure_ipv6(v6, 0, 0);
+	}
+
+	if (nss_status != NSS_TX_SUCCESS) {
+		nss_capwapmgr_warn("%px: %d: IPv4/IPv6 rule create failed with status: %d", dev, forward_if_num, nss_status);
+		status = NSS_CAPWAPMGR_FAILURE_IP_RULE;
+		goto fail5;
+	}
+
+	priv = netdev_priv(dev);
+	t = &priv->tunnel[tunnel_id];
+	nss_capwapmgr_info("%px: %d: %d: CAPWAP TUNNEL CREATE DONE tunnel_id:%d (%px)\n", dev, capwap_if_num_inner, capwap_if_num_outer, tunnel_id, t);
+
+	/*
+	 * Keep a copy of rule information.
+	 */
+	if (v4) {
+		memcpy(&t->ip_rule.v4, v4, sizeof (struct nss_ipv4_create));
+	} else {
+		memcpy(&t->ip_rule.v6, v6, sizeof (struct nss_ipv6_create));
+	}
+
+	memcpy(&t->capwap_rule, capwap_rule, sizeof (struct nss_capwap_rule_msg));
+
+	/*
+	 * Make it globally visible inside the netdev.
+	 */
+	t->if_num_inner = capwap_if_num_inner;
+	t->if_num_outer = capwap_if_num_outer;
+	priv->if_num_to_tunnel_id[capwap_if_num_inner] = tunnel_id;
+	priv->if_num_to_tunnel_id[capwap_if_num_outer] = tunnel_id;
+	t->tunnel_state |= NSS_CAPWAPMGR_TUNNEL_STATE_CONFIGURED;
+	t->tunnel_state |= NSS_CAPWAPMGR_TUNNEL_STATE_IPRULE_CONFIGURED;
+	t->type_flags = type_flags;
+
+	goto done;
+
+fail5:
+	if (dtls_enabled) {
+		if (nss_dtlsmgr_session_destroy(t->dtls_dev) != NSS_DTLSMGR_OK) {
+			nss_capwapmgr_warn("%px: failed to destroy DTLS session", t->dtls_dev);
+		}
+	}
+fail4:
+	nss_capwapmgr_unregister_with_nss(capwap_if_num_outer);
+fail3:
+	(void)nss_dynamic_interface_dealloc_node(capwap_if_num_outer, NSS_DYNAMIC_INTERFACE_TYPE_CAPWAP_OUTER);
+fail2:
+	nss_capwapmgr_unregister_with_nss(capwap_if_num_inner);
+fail1:
+	(void)nss_dynamic_interface_dealloc_node(capwap_if_num_inner, NSS_DYNAMIC_INTERFACE_TYPE_CAPWAP_HOST_INNER);
+
+done:
+	dev_put(dev);
+	return status;
+}
+
+/*
+ * nss_capwapmgr_ipv4_tunnel_create()
+ *	API for creating IPv4 and CAPWAP rule.
+ */
+nss_capwapmgr_status_t nss_capwapmgr_ipv4_tunnel_create(struct net_device *dev, uint8_t tunnel_id,
+			struct nss_ipv4_create *ip_rule, struct nss_capwap_rule_msg *capwap_rule, struct nss_dtlsmgr_config *in_data)
+{
+	return nss_capwapmgr_tunnel_create_common(dev, tunnel_id, ip_rule, NULL, capwap_rule, in_data);
+}
+EXPORT_SYMBOL(nss_capwapmgr_ipv4_tunnel_create);
+
+/*
+ * nss_capwapmgr_ipv6_tunnel_create()
+ *	API for creating IPv6 and CAPWAP rule.
+ */
+nss_capwapmgr_status_t nss_capwapmgr_ipv6_tunnel_create(struct net_device *dev, uint8_t tunnel_id,
+			struct nss_ipv6_create *ip_rule, struct nss_capwap_rule_msg *capwap_rule, struct nss_dtlsmgr_config *in_data)
+{
+	return nss_capwapmgr_tunnel_create_common(dev, tunnel_id, NULL, ip_rule, capwap_rule, in_data);
+}
+EXPORT_SYMBOL(nss_capwapmgr_ipv6_tunnel_create);
+
+/*
+ * nss_capwapmgr_tunnel_save_stats()
+ *	Internal function to save tunnel stats when a tunnel is being
+ *	destroyed.
+ */
+static void nss_capwapmgr_tunnel_save_stats(struct nss_capwap_tunnel_stats *save, struct nss_capwap_tunnel_stats *fstats)
+{
+	save->dtls_pkts += fstats->dtls_pkts;
+
+	save->rx_segments += fstats->rx_segments;
+	save->rx_dup_frag += fstats->rx_dup_frag;
+	save->rx_oversize_drops += fstats->rx_oversize_drops;
+	save->rx_frag_timeout_drops += fstats->rx_frag_timeout_drops;
+	save->rx_n2h_drops += fstats->rx_n2h_drops;
+	save->rx_n2h_queue_full_drops += fstats->rx_n2h_queue_full_drops;
+	save->rx_mem_failure_drops += fstats->rx_mem_failure_drops;
+	save->rx_csum_drops += fstats->rx_csum_drops;
+	save->rx_malformed += fstats->rx_malformed;
+	save->rx_frag_gap_drops += fstats->rx_frag_gap_drops;
+
+	save->tx_segments += fstats->tx_segments;
+	save->tx_queue_full_drops += fstats->tx_queue_full_drops;
+	save->tx_mem_failure_drops += fstats->tx_mem_failure_drops;
+	save->tx_dropped_sg_ref += fstats->tx_dropped_sg_ref;
+	save->tx_dropped_ver_mis += fstats->tx_dropped_ver_mis;
+	save->tx_dropped_hroom += fstats->tx_dropped_hroom;
+	save->tx_dropped_dtls += fstats->tx_dropped_dtls;
+	save->tx_dropped_nwireless += fstats->tx_dropped_nwireless;
+
+	/*
+	 * add pnode stats now.
+	 */
+	save->pnode_stats.rx_packets += fstats->pnode_stats.rx_packets;
+	save->pnode_stats.rx_bytes += fstats->pnode_stats.rx_bytes;
+	save->pnode_stats.rx_dropped += fstats->pnode_stats.rx_dropped;
+	save->pnode_stats.tx_packets += fstats->pnode_stats.tx_packets;
+	save->pnode_stats.tx_bytes += fstats->pnode_stats.tx_bytes;
+}
+
+/*
+ * nss_capwapmgr_tunnel_destroy()
+ *	API for destroying a tunnel. CAPWAP tunnel must be first disabled.
+ */
+nss_capwapmgr_status_t nss_capwapmgr_tunnel_destroy(struct net_device *dev, uint8_t tunnel_id)
+{
+	struct nss_capwap_tunnel_stats stats;
+	struct nss_ipv4_destroy v4;
+	struct nss_ipv6_destroy v6;
+	struct nss_capwapmgr_priv *priv;
+	struct nss_capwapmgr_tunnel *t;
+	nss_tx_status_t nss_status = NSS_TX_SUCCESS;
+	uint32_t if_num_inner, if_num_outer;
+	nss_capwapmgr_status_t status = NSS_CAPWAPMGR_SUCCESS;
+
+	dev_hold(dev);
+	t = nss_capwapmgr_verify_tunnel_param(dev, tunnel_id);
+	if (!t) {
+		nss_capwapmgr_warn("%px: tunnel %d: wrong argument for tunnel destroy\n", dev, tunnel_id);
+		status = NSS_CAPWAPMGR_FAILURE_BAD_PARAM;
+		goto done;
+	}
+
+	if (!(t->tunnel_state & NSS_CAPWAPMGR_TUNNEL_STATE_CONFIGURED)) {
+		nss_capwapmgr_warn("%px: tunnel %d is not configured yet\n", dev, tunnel_id);
+		status = NSS_CAPWAPMGR_FAILURE_TUNNEL_NOT_CFG;
+		goto done;
+	}
+
+	/*
+	 * We don't allow destroy operation on tunnel if it's still enabled.
+	 */
+	if (t->tunnel_state & NSS_CAPWAPMGR_TUNNEL_STATE_ENABLED) {
+		nss_capwapmgr_warn("%px: no destroy alloed for an eanbled tunnel: %d\n", dev, tunnel_id);
+		status = NSS_CAPWAPMGR_FAILURE_TUNNEL_ENABLED;
+		goto done;
+	}
+
+	if (!(t->capwap_rule.l3_proto == NSS_CAPWAP_TUNNEL_IPV4 ||
+		t->capwap_rule.l3_proto == NSS_CAPWAP_TUNNEL_IPV6)) {
+		nss_capwapmgr_warn("%px: tunnel %d: wrong argument for l3_proto\n", dev, tunnel_id);
+		status = NSS_CAPWAPMGR_FAILURE_BAD_PARAM;
+		goto done;
+	}
+
+	if (!(t->capwap_rule.which_udp == NSS_CAPWAP_TUNNEL_UDP ||
+		t->capwap_rule.which_udp == NSS_CAPWAP_TUNNEL_UDPLite)) {
+		nss_capwapmgr_warn("%px: tunnel %d: wrong argument for which_udp(%d)\n", dev, tunnel_id, t->capwap_rule.which_udp);
+		status = NSS_CAPWAPMGR_FAILURE_BAD_PARAM;
+		goto done;
+	}
+
+	priv = netdev_priv(dev);
+	nss_capwapmgr_info("%px: %d: tunnel destroy is being called\n", dev, tunnel_id);
+
+	if_num_inner = t->if_num_inner;
+	if_num_outer = t->if_num_outer;
+
+	if (priv->if_num_to_tunnel_id[if_num_inner] != tunnel_id) {
+		nss_capwapmgr_warn("%px: %d: tunnel_id %d didn't match with tunnel_id :%d\n",
+			dev, if_num_inner, tunnel_id, priv->if_num_to_tunnel_id[if_num_inner]);
+		status = NSS_CAPWAPMGR_FAILURE_BAD_PARAM;
+		goto done;
+	}
+
+	if (priv->if_num_to_tunnel_id[if_num_outer] != tunnel_id) {
+		nss_capwapmgr_warn("%px: %d: tunnel_id %d didn't match with tunnel_id :%d\n",
+			dev, if_num_outer, tunnel_id, priv->if_num_to_tunnel_id[if_num_outer]);
+		status = NSS_CAPWAPMGR_FAILURE_BAD_PARAM;
+		goto done;
+	}
+
+	if (nss_capwap_get_stats(if_num_inner, &stats) == true) {
+		nss_capwapmgr_tunnel_save_stats(&global.tunneld, &stats);
+	}
+
+	if (nss_capwap_get_stats(if_num_outer, &stats) == true) {
+		nss_capwapmgr_tunnel_save_stats(&global.tunneld, &stats);
+	}
+
+	/*
+	 * Destroy IP rule first.
+	 */
+	if (t->tunnel_state & NSS_CAPWAPMGR_TUNNEL_STATE_IPRULE_CONFIGURED) {
+		if (t->capwap_rule.l3_proto == NSS_CAPWAP_TUNNEL_IPV4) {
+			memset(&v4, 0, sizeof (struct nss_ipv4_destroy));
+			v4.protocol = IPPROTO_UDP;
+			v4.src_ip = t->ip_rule.v4.src_ip;
+			v4.dest_ip = t->ip_rule.v4.dest_ip;
+			v4.src_port = t->ip_rule.v4.src_port;
+			v4.dest_port = t->ip_rule.v4.dest_port;
+			nss_status = nss_capwapmgr_unconfigure_ipv4_rule(&v4);
+		} else {
+			memset(&v6, 0, sizeof (struct nss_ipv6_destroy));
+			if (t->capwap_rule.which_udp == NSS_CAPWAP_TUNNEL_UDP) {
+				v6.protocol = IPPROTO_UDP;
+			} else {
+				v6.protocol = IPPROTO_UDPLITE;
+			}
+
+			v6.src_ip[0] = t->ip_rule.v6.src_ip[0];
+			v6.src_ip[1] = t->ip_rule.v6.src_ip[1];
+			v6.src_ip[2] = t->ip_rule.v6.src_ip[2];
+			v6.src_ip[3] = t->ip_rule.v6.src_ip[3];
+
+			v6.dest_ip[0] = t->ip_rule.v6.dest_ip[0];
+			v6.dest_ip[1] = t->ip_rule.v6.dest_ip[1];
+			v6.dest_ip[2] = t->ip_rule.v6.dest_ip[2];
+			v6.dest_ip[3] = t->ip_rule.v6.dest_ip[3];
+
+			v6.src_port = t->ip_rule.v6.src_port;
+			v6.dest_port = t->ip_rule.v6.dest_port;
+			nss_status = nss_capwapmgr_unconfigure_ipv6_rule(&v6);
+		}
+
+		if (nss_status != NSS_TX_SUCCESS) {
+			nss_capwapmgr_warn("%px: Unconfigure IP rule failed for tunnel : %d\n",
+				dev, tunnel_id);
+			status = NSS_CAPWAPMGR_FAILURE_IP_DESTROY_RULE;
+			goto done;
+		}
+		t->tunnel_state &= ~NSS_CAPWAPMGR_TUNNEL_STATE_IPRULE_CONFIGURED;
+	}
+
+	/*
+	 * Destroy CAPWAP rule now.
+	 */
+	status = nss_capwapmgr_tunnel_action(priv->nss_ctx, dev, if_num_outer, NSS_CAPWAP_MSG_TYPE_UNCFG_RULE);
+	if (status != NSS_CAPWAPMGR_SUCCESS) {
+		nss_capwapmgr_warn("%px: %d: Unconfigure CAPWAP rule failed for tunnel : %d\n",
+			dev, if_num_outer, tunnel_id);
+		goto fail;
+
+	}
+
+	status = nss_capwapmgr_tunnel_action(priv->nss_ctx, dev, if_num_inner, NSS_CAPWAP_MSG_TYPE_UNCFG_RULE);
+	if (status != NSS_CAPWAPMGR_SUCCESS) {
+		nss_capwapmgr_warn("%px: %d: Unconfigure CAPWAP rule failed for tunnel : %d\n",
+			dev, if_num_inner, tunnel_id);
+		status = nss_capwapmgr_create_capwap_rule(dev, if_num_outer, &(t->capwap_rule), t->type_flags);
+			if (status != NSS_CAPWAPMGR_SUCCESS) {
+				nss_capwapmgr_warn("%px: %d: re creating the CAPWAP rule failed for tunnel : %d\n",
+			dev, if_num_inner, tunnel_id);
+				goto done;
+			}
+
+		goto fail;
+
+	}
+
+	nss_capwapmgr_unregister_with_nss(if_num_outer);
+	nss_capwapmgr_unregister_with_nss(if_num_inner);
+
+	/*
+	 * Deallocate dynamic interface
+	 */
+	nss_status = nss_dynamic_interface_dealloc_node(if_num_outer, NSS_DYNAMIC_INTERFACE_TYPE_CAPWAP_OUTER);
+	if (nss_status != NSS_TX_SUCCESS) {
+		nss_capwapmgr_warn("%px: %d: Dealloc of dynamic interface failed for tunnel : %d\n",
+			dev, if_num_outer, tunnel_id);
+	}
+
+	nss_status = nss_dynamic_interface_dealloc_node(if_num_inner, NSS_DYNAMIC_INTERFACE_TYPE_CAPWAP_HOST_INNER);
+	if (nss_status != NSS_TX_SUCCESS) {
+		nss_capwapmgr_warn("%px: %d: Dealloc of dynamic interface failed for tunnel : %d\n",
+			dev, if_num_inner, tunnel_id);
+	}
+
+	/*
+	 * Unconfigure Trustsec Tx
+	 */
+	 if (t->capwap_rule.enabled_features & NSS_CAPWAPMGR_FEATURE_OUTER_TRUSTSEC_ENABLED) {
+		if (t->capwap_rule.enabled_features & NSS_CAPWAPMGR_FEATURE_DTLS_ENABLED) {
+			nss_status = nss_trustsec_tx_unconfigure_sgt(t->capwap_rule.dtls_inner_if_num, t->capwap_rule.outer_sgt_value);
+		} else {
+			nss_status = nss_trustsec_tx_unconfigure_sgt(t->if_num_outer, t->capwap_rule.outer_sgt_value);
+		}
+
+		if (nss_status != NSS_TX_SUCCESS) {
+			nss_capwapmgr_warn("%px: unconfigure trustsec_tx failed\n", dev);
+		}
+	 }
+
+	/*
+	 * Destroy DTLS node if there is one associated to this tunnel
+	 */
+	if (t->capwap_rule.dtls_inner_if_num) {
+		if (nss_dtlsmgr_session_destroy(t->dtls_dev) != NSS_DTLSMGR_OK) {
+			nss_capwapmgr_warn("%px: failed to destroy DTLS session", t->dtls_dev);
+		}
+	}
+
+	t->tunnel_state &= ~NSS_CAPWAPMGR_TUNNEL_STATE_CONFIGURED;
+	priv->if_num_to_tunnel_id[if_num_inner] = -1;
+	priv->if_num_to_tunnel_id[if_num_outer] = -1;
+
+	memset(t, 0, sizeof(struct nss_capwapmgr_tunnel));
+
+	t->if_num_inner = -1;
+	t->if_num_outer = -1;
+
+	nss_capwapmgr_info("%px: Tunnel %d is completely destroyed\n", dev , tunnel_id);
+	status = NSS_CAPWAPMGR_SUCCESS;
+	goto done;
+
+fail:
+	if (t->capwap_rule.l3_proto == NSS_CAPWAP_TUNNEL_IPV4) {
+		nss_status = nss_capwapmgr_configure_ipv4(&t->ip_rule.v4, 0, 0);
+	} else {
+		nss_status = nss_capwapmgr_configure_ipv6(&t->ip_rule.v6, 0, 0);
+	}
+
+	if (nss_status == NSS_TX_SUCCESS) {
+		t->tunnel_state |= NSS_CAPWAPMGR_TUNNEL_STATE_IPRULE_CONFIGURED;
+	}
+
+	status = NSS_CAPWAPMGR_FAILURE_CAPWAP_DESTROY_RULE;
+
+done:
+	dev_put(dev);
+	return status;
+}
+EXPORT_SYMBOL(nss_capwapmgr_tunnel_destroy);
+
+/*
+ * nss_capwapmgr_flow_rule_action()
+ */
+static inline nss_capwapmgr_status_t nss_capwapmgr_flow_rule_action(struct net_device *dev, uint8_t tunnel_id,
+						nss_capwap_msg_type_t cmd, uint16_t ip_version,
+						uint16_t protocol, uint32_t *src_ip, uint32_t *dst_ip,
+						uint16_t src_port, uint16_t dst_port, uint32_t flow_id)
+{
+	struct nss_capwapmgr_priv *priv;
+	struct nss_capwap_msg capwapmsg;
+	struct nss_capwap_flow_rule_msg *ncfrm;
+	struct nss_capwapmgr_tunnel *t;
+	nss_capwapmgr_status_t status;
+
+	dev_hold(dev);
+	t = nss_capwapmgr_verify_tunnel_param(dev, tunnel_id);
+	if (!t) {
+		nss_capwapmgr_warn("%px: can't find tunnel: %d\n", dev, tunnel_id);
+		status = NSS_CAPWAPMGR_FAILURE_BAD_PARAM;
+		goto done;
+	}
+
+	priv = netdev_priv(dev);
+
+	memset(&capwapmsg, 0, sizeof(struct nss_capwap_msg));
+	nss_capwap_msg_init(&capwapmsg, t->if_num_outer, cmd,
+		sizeof(struct nss_capwap_flow_rule_msg), nss_capwapmgr_msg_event_receive, dev);
+
+	/*
+	 * Set flow rule message
+	 */
+	if (cmd == NSS_CAPWAP_MSG_TYPE_FLOW_RULE_ADD) {
+		ncfrm = &capwapmsg.msg.flow_rule_add;
+	} else {
+		ncfrm = &capwapmsg.msg.flow_rule_del;
+	}
+	ncfrm->protocol = protocol;
+	ncfrm->src_port = src_port;
+	ncfrm->dst_port = dst_port;
+	ncfrm->ip_version = ip_version;
+	memcpy(ncfrm->src_ip, src_ip, sizeof(struct in6_addr));
+	memcpy(ncfrm->dst_ip, dst_ip, sizeof(struct in6_addr));
+	ncfrm->flow_id = flow_id;
+
+	/*
+	 * Send flow rule message to NSS core
+	 */
+	status = nss_capwapmgr_tx_msg_sync(priv->nss_ctx, dev, &capwapmsg);
+	if (status != NSS_CAPWAPMGR_SUCCESS) {
+		nss_capwapmgr_warn("%px: send flow rule message failed with error: %d\n", dev, status);
+	}
+
+done:
+	dev_put(dev);
+	return status;
+}
+
+/*
+ * nss_capwapmgr_add_flow_rule()
+ *	Send a capwap flow rule add message to NSS core.
+ */
+nss_capwapmgr_status_t nss_capwapmgr_add_flow_rule(struct net_device *dev, uint8_t tunnel_id, uint16_t ip_version,
+						uint16_t protocol, uint32_t *src_ip, uint32_t *dst_ip,
+						uint16_t src_port, uint16_t dst_port, uint32_t flow_id)
+{
+	return nss_capwapmgr_flow_rule_action(dev, tunnel_id, NSS_CAPWAP_MSG_TYPE_FLOW_RULE_ADD, ip_version,
+											protocol, src_ip, dst_ip, src_port, dst_port, flow_id);
+}
+EXPORT_SYMBOL(nss_capwapmgr_add_flow_rule);
+
+/*
+ * nss_capwapmgr_del_flow_rule()
+ *	Send a capwap flow rule del message to NSS core.
+ */
+nss_capwapmgr_status_t nss_capwapmgr_del_flow_rule(struct net_device *dev, uint8_t tunnel_id, uint16_t ip_version,
+						uint16_t protocol, uint32_t *src_ip, uint32_t *dst_ip,
+						uint16_t src_port, uint16_t dst_port)
+{
+	return nss_capwapmgr_flow_rule_action(dev, tunnel_id, NSS_CAPWAP_MSG_TYPE_FLOW_RULE_DEL, ip_version,
+											protocol, src_ip, dst_ip, src_port, dst_port, 0);
+}
+EXPORT_SYMBOL(nss_capwapmgr_del_flow_rule);
+
+/*
+ * nss_capwapmgr_tunnel_stats()
+ *	Gets tunnel stats from netdev
+ */
+nss_capwapmgr_status_t nss_capwapmgr_tunnel_stats(struct net_device *dev,
+		uint8_t tunnel_id, struct nss_capwap_tunnel_stats *stats)
+{
+	struct nss_capwapmgr_tunnel *t;
+	struct nss_capwap_tunnel_stats stats_temp;
+	nss_capwapmgr_status_t status = NSS_CAPWAPMGR_SUCCESS;
+
+	if (!stats) {
+		nss_capwapmgr_warn("%px: invalid rtnl structure\n", dev);
+		return NSS_CAPWAPMGR_FAILURE_BAD_PARAM;
+	}
+
+	dev_hold(dev);
+	t = nss_capwapmgr_verify_tunnel_param(dev, tunnel_id);
+	if (!t) {
+		nss_capwapmgr_trace("%px: can't find tunnel: %d\n", dev, tunnel_id);
+		status = NSS_CAPWAPMGR_FAILURE_BAD_PARAM;
+		goto done;
+	}
+
+	if (!(t->tunnel_state & NSS_CAPWAPMGR_TUNNEL_STATE_CONFIGURED)) {
+		nss_capwapmgr_trace("%px: tunnel: %d not configured yet\n", dev, tunnel_id);
+		status = NSS_CAPWAPMGR_FAILURE_TUNNEL_NOT_CFG;
+		goto done;
+	}
+
+	/*
+	 * Copy the inner interface stats.
+	 */
+	if (nss_capwap_get_stats(t->if_num_inner, &stats_temp) == false) {
+		nss_capwapmgr_warn("%px: tunnel %d not ready yet\n", dev, tunnel_id);
+		status = NSS_CAPWAPMGR_FAILURE_NOT_READY;
+		goto done;
+	}
+
+	stats->dtls_pkts += stats_temp.dtls_pkts;
+	stats->tx_segments += stats_temp.tx_segments;
+	stats->tx_queue_full_drops += stats_temp.tx_queue_full_drops;
+	stats->tx_mem_failure_drops += stats_temp.tx_mem_failure_drops;
+	stats->tx_dropped_sg_ref += stats_temp.tx_dropped_sg_ref;
+	stats->tx_dropped_ver_mis += stats_temp.tx_dropped_ver_mis;
+	stats->tx_dropped_hroom += stats_temp.tx_dropped_hroom;
+	stats->tx_dropped_dtls += stats_temp.tx_dropped_dtls;
+	stats->tx_dropped_nwireless += stats_temp.tx_dropped_nwireless;
+
+	/*
+	 * Pnode tx stats for Inner node.
+	 */
+	stats->pnode_stats.tx_packets += stats_temp.pnode_stats.tx_packets;
+	stats->pnode_stats.tx_bytes += stats_temp.pnode_stats.tx_bytes;
+	stats->tx_dropped_inner += stats_temp.tx_dropped_inner;
+
+	/*
+	 * Copy the outer interface stats.
+	 */
+	if (nss_capwap_get_stats(t->if_num_outer, &stats_temp) == false) {
+		nss_capwapmgr_warn("%px: tunnel %d not ready yet\n", dev, tunnel_id);
+		status = NSS_CAPWAPMGR_FAILURE_NOT_READY;
+		goto done;
+	}
+
+	stats->rx_segments += stats_temp.rx_segments;
+	stats->dtls_pkts += stats_temp.dtls_pkts;
+	stats->rx_dup_frag += stats_temp.rx_dup_frag;
+	stats->rx_oversize_drops += stats_temp.rx_oversize_drops;
+	stats->rx_frag_timeout_drops += stats_temp.rx_frag_timeout_drops;
+	stats->rx_n2h_drops += stats_temp.rx_n2h_drops;
+	stats->rx_n2h_queue_full_drops += stats_temp.rx_n2h_queue_full_drops;
+	stats->rx_mem_failure_drops += stats_temp.rx_mem_failure_drops;
+	stats->rx_csum_drops += stats_temp.rx_csum_drops;
+	stats->rx_malformed += stats_temp.rx_malformed;
+	stats->rx_frag_gap_drops += stats_temp.rx_frag_gap_drops;
+
+	/*
+	 * Pnode rx stats for outer node.
+	 */
+	stats->pnode_stats.rx_packets += stats_temp.pnode_stats.rx_packets;
+	stats->pnode_stats.rx_bytes += stats_temp.pnode_stats.rx_bytes;
+	stats->pnode_stats.rx_dropped += stats_temp.pnode_stats.rx_dropped;
+
+done:
+	dev_put(dev);
+	return status;
+}
+EXPORT_SYMBOL(nss_capwapmgr_tunnel_stats);
+
+/*
+ * nss_capwapmgr_receive_pkt()
+ *	Receives a pkt from NSS
+ */
+static void nss_capwapmgr_receive_pkt(struct net_device *dev, struct sk_buff *skb, struct napi_struct *napi)
+{
+	struct nss_capwapmgr_priv *priv;
+	struct nss_capwap_metaheader *pre = (struct nss_capwap_metaheader *)skb->data;
+	int32_t if_num;
+
+	if (unlikely(skb->len < sizeof(struct nss_capwap_metaheader))) {
+		nss_capwapmgr_warn("%px: skb len is short :%d", dev, skb->len);
+		dev_kfree_skb_any(skb);
+		return;
+	}
+
+	/* SKB NETIF START */
+	dev_hold(dev);
+	priv = netdev_priv(dev);
+	if_num = pre->tunnel_id;	/* NSS FW sends interface number */
+	if (unlikely(if_num > NSS_MAX_NET_INTERFACES)) {
+		nss_capwapmgr_warn("%px: if_num %d is wrong for skb\n", dev, if_num);
+		pre->tunnel_id = 0xFF;
+	} else {
+		/*
+		 * Remap interface number to tunnel_id.
+		 */
+		pre->tunnel_id = priv->if_num_to_tunnel_id[if_num];
+	}
+
+	skb->dev = dev;
+	skb->pkt_type = PACKET_HOST;
+	skb->skb_iif = dev->ifindex;
+	skb_reset_mac_header(skb);
+	skb_reset_transport_header(skb);
+	(void)netif_receive_skb(skb);
+	/* SKB NETIF END */
+	dev_put(dev);
+}
+
+/*
+ * nss_capwapmgr_acl_init()
+ *	Initializes ACL related tables and objects.
+ */
+bool nss_capwapmgr_acl_init(void)
+{
+	sw_error_t rv;
+	int i, j, uid = 0;
+
+	/*
+	 * Create and bind the ACL list we will be using for dscp prioritization.
+	 */
+	for (i = 0; i < NSS_CAPWAPMGR_ACL_LIST_CNT; i++) {
+		int list_id = NSS_CAPWAPMGR_ACL_LIST_START + i;
+		rv = fal_acl_list_creat(0, list_id, 0);
+		if (rv != SW_OK) {
+			nss_capwapmgr_warn("Failed to create ACL list err:%d\n", rv);
+			return false;
+		}
+
+		rv = fal_acl_list_bind(0, list_id, FAL_ACL_DIREC_IN, FAL_ACL_BIND_PORTBITMAP, NSS_CAPWAPMGR_BIND_BITMAP);
+		if (rv != SW_OK) {
+			nss_capwapmgr_warn("Failed to bind ACL list err:%d\n", rv);
+			return false;
+		}
+	}
+
+	/*
+	 * Initialize the globacl ACL table.
+	 */
+	for (i = 0; i < NSS_CAPWAPMGR_ACL_LIST_CNT; i++) {
+		for (j = 0; j < NSS_CAPWAPMGR_ACL_RULES_PER_LIST; j++) {
+			global.acl_list[i].rule[j].uid = uid++;
+			global.acl_list[i].rule[j].rule_id = j;
+			global.acl_list[i].rule[j].list_id = i;
+		}
+	}
+
+	return true;
+}
+
+#if defined(NSS_CAPWAPMGR_ONE_NETDEV)
+/*
+ * nss_capwapmgr_get_netdev()
+ *	Returns net device used.
+ */
+struct net_device *nss_capwapmgr_get_netdev(void)
+{
+	return nss_capwapmgr_ndev;
+}
+EXPORT_SYMBOL(nss_capwapmgr_get_netdev);
+#endif
+
+/*
+ * nss_capwapmgr_netdev_up()
+ *	NSS CAPWAP Tunnel device i/f up handler
+ */
+static int nss_capwapmgr_netdev_up(struct net_device *netdev)
+{
+	uint8_t i;
+	for (i = 0; i < NSS_CAPWAPMGR_MAX_TUNNELS; i++) {
+		(void)nss_capwapmgr_enable_tunnel(nss_capwapmgr_ndev, i);
+	}
+
+	return NOTIFY_DONE;
+}
+
+/*
+ * nss_capwapmgr_netdev_down()
+ *	NSS CAPWAP Tunnel device i/f up handler
+ */
+static int nss_capwapmgr_netdev_down(struct net_device *netdev)
+{
+	uint8_t i;
+	for (i = 0; i < NSS_CAPWAPMGR_MAX_TUNNELS; i++) {
+		(void)nss_capwapmgr_disable_tunnel(nss_capwapmgr_ndev, i);
+	}
+
+	return NOTIFY_DONE;
+}
+
+/*
+ * nss_capwapmgr_netdev_event()
+ *	Net device notifier for NSS CAPWAP manager module
+ */
+static int nss_capwapmgr_netdev_event(struct notifier_block *nb, unsigned long event, void *dev)
+{
+	struct net_device *netdev = (struct net_device *)dev;
+
+	if (strstr(netdev->name, NSS_CAPWAPMGR_NETDEV_NAME) == NULL) {
+		return NOTIFY_DONE;
+	}
+
+	switch (event) {
+	case NETDEV_UP:
+		nss_capwapmgr_trace("%px: NETDEV_UP: event %lu name %s\n", netdev, event, netdev->name);
+		return nss_capwapmgr_netdev_up(netdev);
+
+	case NETDEV_DOWN:
+		nss_capwapmgr_trace("%px: NETDEV_DOWN: event %lu name %s\n", netdev, event, netdev->name);
+		return nss_capwapmgr_netdev_down(netdev);
+
+	default:
+		nss_capwapmgr_trace("%px: Unhandled notifier event %lu name %s\n", netdev, event, netdev->name);
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+/*
+ * Linux netdev event function.
+ */
+static int nss_capwapmgr_netdev_event(struct notifier_block  *nb, unsigned long event, void  *dev);
+
+/*
+ * Linux Net device Notifier
+ */
+struct notifier_block nss_capwapmgr_netdev_notifier = {
+	.notifier_call = nss_capwapmgr_netdev_event,
+};
+
+/*
+ * nss_capwapmgr_init_module()
+ *	Tunnel CAPWAP module init function
+ */
+int __init nss_capwapmgr_init_module(void)
+{
+#ifdef CONFIG_OF
+	/*
+	 * If the node is not compatible, don't do anything.
+	 */
+	if (!of_find_node_by_name(NULL, "nss-common")) {
+		return 0;
+	}
+#endif
+	nss_capwapmgr_info("module (platform - IPQ806x, %s) loaded\n",
+			   NSS_CLIENT_BUILD_ID);
+
+	register_netdevice_notifier(&nss_capwapmgr_netdev_notifier);
+
+#if defined(NSS_CAPWAPMGR_ONE_NETDEV)
+	/*
+	 * In this code, we create a single netdev for all the CAPWAP
+	 * tunnels.
+	 */
+	nss_capwapmgr_ndev = nss_capwapmgr_netdev_create();
+	if (!nss_capwapmgr_ndev) {
+		nss_capwapmgr_warn("Couldn't create capwap interface\n");
+		return -1;
+	}
+#endif
+
+	memset(&global.tunneld, 0, sizeof(struct nss_capwap_tunnel_stats));
+
+	/*
+	 * Initialize ACL related objects and tables.
+	 */
+	if (!nss_capwapmgr_acl_init()) {
+		nss_capwapmgr_warn("Couldn't initialize ACL objects/tables\n");
+		return -1;
+	}
+
+	sema_init(&ip_response.sem, 1);
+	init_waitqueue_head(&ip_response.wq);
+
+	return 0;
+}
+
+/*
+ * nss_capwapmgr_exit_module()
+ *	Tunnel CAPWAP module exit function
+ */
+void __exit nss_capwapmgr_exit_module(void)
+{
+#if defined(NSS_CAPWAPMGR_ONE_NETDEV)
+	struct nss_capwapmgr_priv *priv;
+	uint8_t i;
+#endif
+
+#ifdef CONFIG_OF
+	/*
+	 * If the node is not compatible, don't do anything.
+	 */
+	if (!of_find_node_by_name(NULL, "nss-common")) {
+		return;
+	}
+#endif
+
+#if defined(NSS_CAPWAPMGR_ONE_NETDEV)
+	priv = netdev_priv(nss_capwapmgr_ndev);
+	for (i = 0; i < NSS_CAPWAPMGR_MAX_TUNNELS; i++) {
+		(void) nss_capwapmgr_disable_tunnel(nss_capwapmgr_ndev, i);
+		(void) nss_capwapmgr_tunnel_destroy(nss_capwapmgr_ndev, i);
+	}
+	kfree(priv->if_num_to_tunnel_id);
+	kfree(priv->resp);
+	kfree(priv->tunnel);
+	unregister_netdev(nss_capwapmgr_ndev);
+	nss_capwapmgr_ndev = NULL;
+#endif
+	unregister_netdevice_notifier(&nss_capwapmgr_netdev_notifier);
+
+	nss_capwapmgr_info("module unloaded\n");
+}
+
+module_init(nss_capwapmgr_init_module);
+module_exit(nss_capwapmgr_exit_module);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("NSS CAPWAP manager");
diff --git a/qca-nss-clients/clmapmgr/Makefile b/qca-nss-clients/clmapmgr/Makefile
new file mode 100644
index 0000000..1131778
--- /dev/null
+++ b/qca-nss-clients/clmapmgr/Makefile
@@ -0,0 +1,5 @@
+ccflags-y := -I$(obj)/../exports -I$(obj)/..  \
+	-DNSS_CLIENT_BUILD_ID="$(BUILD_ID)" -DNSS_CLMAPMGR_DEBUG_LEVEL=2 -Wall -Werror
+
+obj-m += qca-nss-clmapmgr.o
+qca-nss-clmapmgr-objs := nss_clmapmgr.o
diff --git a/qca-nss-clients/clmapmgr/nss_clmapmgr.c b/qca-nss-clients/clmapmgr/nss_clmapmgr.c
new file mode 100644
index 0000000..9574706
--- /dev/null
+++ b/qca-nss-clients/clmapmgr/nss_clmapmgr.c
@@ -0,0 +1,996 @@
+/*
+ **************************************************************************
+ * Copyright (c) 2019-2020 The Linux Foundation. All rights reserved.
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all copies.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ **************************************************************************
+ */
+
+/*
+ * nss_clmapmgr.c
+ *  This file implements client for CLient map manager.
+ */
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+#include <linux/if_vlan.h>
+
+#include <nss_api_if.h>
+#include <nss_dynamic_interface.h>
+
+#include "nss_clmapmgr_private.h"
+#include "nss_clmapmgr.h"
+#include "nss_eogremgr.h"
+
+#define NSS_CLMAPMGR_CMD_MAX_RETRY_COUNT 3
+#define NSS_CLMAP_MAX_HEADROOM NSS_EOGREMGR_MAX_HEADROOM
+
+/*
+ * nss_clmapmgr_dev_xmit()
+ *	Netdev ops function to send packet to NSS.
+ */
+static netdev_tx_t nss_clmapmgr_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	nss_tx_status_t status;
+	int if_number;
+	struct nss_ctx_instance *clmap_ctx;
+	struct nss_clmapmgr_priv_t *priv = netdev_priv(dev);
+
+	if_number = priv->nss_if_number_us;
+	if (unlikely(if_number <= 0)) {
+		nss_clmapmgr_info("%px: clmapmgr dev is not registered with nss\n", dev);
+		goto fail;
+	}
+
+	clmap_ctx = nss_clmap_get_ctx();
+	if (unlikely(!clmap_ctx)) {
+		nss_clmapmgr_info("%px: NSS clmapmgr context not found.\n", dev);
+		goto fail;
+	}
+
+	status = nss_clmap_tx_buf(clmap_ctx, skb, (uint32_t)if_number);
+	if (unlikely(status != NSS_TX_SUCCESS)) {
+		if (likely(status == NSS_TX_FAILURE_QUEUE)) {
+			nss_clmapmgr_warning("%px: netdev :%px queue is full", dev, dev);
+			if (!netif_queue_stopped(dev)) {
+				netif_stop_queue(dev);
+			}
+			nss_clmapmgr_warning("%px: (CLMAP packet) Failed to xmit the packet because of tx queue full, status: %d\n", dev, status);
+			return NETDEV_TX_BUSY;
+		}
+		nss_clmapmgr_info("%px: NSS clmapmgr could not send packet to NSS %d\n", dev, if_number);
+		goto fail;
+	}
+
+	return NETDEV_TX_OK;
+
+fail:
+	dev->stats.tx_dropped++;
+	dev_kfree_skb_any(skb);
+	return NETDEV_TX_OK;
+}
+
+/*
+ * nss_clmapmgr_get_dev_stats64()
+ *	Netdev ops function to retrieve stats.
+ */
+static struct rtnl_link_stats64 *nss_clmapmgr_get_dev_stats64(struct net_device *dev,
+						struct rtnl_link_stats64 *stats)
+{
+	struct nss_clmapmgr_priv_t *priv;
+
+	if (!stats) {
+		nss_clmapmgr_warning("%px: invalid rtnl structure\n", dev);
+		return stats;
+	}
+
+	dev_hold(dev);
+
+	/*
+	 * Netdev seems to be incrementing rx_dropped because we don't give IP header.
+	 * So reset it as it's of no use for us.
+	 */
+	atomic_long_set(&dev->rx_dropped, 0);
+	priv = netdev_priv(dev);
+	memset(stats, 0, sizeof(struct rtnl_link_stats64));
+	memcpy(stats, &priv->stats, sizeof(struct rtnl_link_stats64));
+	dev_put(dev);
+
+	return stats;
+}
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 6, 0))
+/*
+ * nss_clmapmgr_dev_stats64()
+ *	Netdev ops function to retrieve stats for kernel version < 4.6
+ */
+static struct rtnl_link_stats64 *nss_clmapmgr_dev_stats64(struct net_device *dev,
+						struct rtnl_link_stats64 *tot)
+{
+	return nss_clmapmgr_get_dev_stats64(dev, tot);
+}
+#else
+/*
+ * nss_clmapmgr_dev_stats64()
+ *	Netdev ops function to retrieve stats for kernel version >= 4.6
+ */
+static void nss_clmapmgr_dev_stats64(struct net_device *dev,
+				struct rtnl_link_stats64 *tot)
+{
+	nss_clmapmgr_get_dev_stats64(dev, tot);
+}
+#endif
+
+/*
+ * nss_clmapmgr_dev_init()
+ *	Netdev ops function to intialize netdevice.
+ */
+static int nss_clmapmgr_dev_init(struct net_device *dev)
+{
+	dev->mtu = ETH_DATA_LEN;
+	dev->needed_headroom = NSS_CLMAP_MAX_HEADROOM;
+	return 0;
+}
+
+/*
+ * nss_clmapmgr_dev_open()
+ *	Netdev ops function to open netdevice.
+ */
+static int nss_clmapmgr_dev_open(struct net_device *dev)
+{
+	netif_start_queue(dev);
+	return 0;
+}
+
+/*
+ * nss_clmapmgr_dev_close()
+ *	Netdevice ops function to close netdevice.
+ */
+static int nss_clmapmgr_dev_close(struct net_device *dev)
+{
+	netif_stop_queue(dev);
+	return 0;
+}
+
+/*
+ * clmap netdevice ops
+ */
+static const struct net_device_ops nss_clmapmgr_ops = {
+	.ndo_init = nss_clmapmgr_dev_init,
+	.ndo_open = nss_clmapmgr_dev_open,
+	.ndo_stop = nss_clmapmgr_dev_close,
+	.ndo_set_mac_address = eth_mac_addr,
+	.ndo_validate_addr = eth_validate_addr,
+	.ndo_start_xmit = nss_clmapmgr_dev_xmit,
+	.ndo_get_stats64 = nss_clmapmgr_dev_stats64,
+};
+
+/*
+ * nss_clmapmgr_setup()
+ */
+static void nss_clmapmgr_setup(struct net_device *dev)
+{
+	char name[IFNAMSIZ] = {0};
+
+	strlcpy(name, "nssclmap%d", IFNAMSIZ);
+	memcpy(dev->name, name, IFNAMSIZ);
+	dev->netdev_ops = &nss_clmapmgr_ops;
+	eth_hw_addr_random(dev);
+}
+
+/*
+ * nss_clmapmgr_ds_exception()
+ *	Client map manager ds exception handler to receive packet from NSS.
+ */
+static void nss_clmapmgr_ds_exception(struct net_device *dev, struct sk_buff *skb,
+				       __attribute__((unused)) struct napi_struct *napi)
+{
+	/*
+	 * Note: preheader needs to be processed by the user
+	 * before processing the ethernet packet
+	 */
+	skb->protocol = eth_type_trans(skb, dev);
+	netif_receive_skb(skb);
+}
+
+/*
+ * nss_clmapmgr_us_exception()
+ *	Client map manager us exception handler to receive packet from NSS.
+ */
+static void nss_clmapmgr_us_exception(struct net_device *dev, struct sk_buff *skb,
+				       __attribute__((unused)) struct napi_struct *napi)
+{
+	/*
+	 * This is an error packet and needs to be dropped.
+	 */
+	nss_clmapmgr_warning("%px: upstream packet got exceptioned, dropping the packet..", dev);
+	dev_kfree_skb_any(skb);
+}
+
+/*
+ * nss_clmapmgr_event_receive()
+ *	Event Callback to receive events from NSS
+ */
+static void nss_clmapmgr_event_receive(void *if_ctx, struct nss_cmn_msg *cmsg)
+{
+	struct net_device *dev = (struct net_device *)if_ctx;
+	struct nss_clmapmgr_priv_t *priv;
+	struct nss_clmap_msg *clmsg = (struct nss_clmap_msg *)cmsg;
+	struct nss_clmap_stats_msg *stats = &clmsg->msg.stats;
+	struct rtnl_link_stats64 *netdev_stats;
+	enum nss_dynamic_interface_type interface_type;
+	uint64_t dropped = 0;
+
+	dev_hold(dev);
+	priv = netdev_priv(dev);
+	netdev_stats = &priv->stats;
+
+	switch (clmsg->cm.type) {
+	case NSS_CLMAP_MSG_TYPE_SYNC_STATS:
+		interface_type = nss_dynamic_interface_get_type(nss_clmap_get_ctx(), clmsg->cm.interface);
+		if (interface_type == NSS_DYNAMIC_INTERFACE_TYPE_CLMAP_US) {
+			netdev_stats->tx_packets += stats->node_stats.tx_packets;
+			netdev_stats->tx_bytes += stats->node_stats.tx_bytes;
+			dropped += stats->dropped_macdb_lookup_failed;
+			dropped += stats->dropped_invalid_packet_size;
+			dropped += stats->dropped_low_hroom;
+		} else if (interface_type == NSS_DYNAMIC_INTERFACE_TYPE_CLMAP_DS) {
+			netdev_stats->rx_packets += stats->node_stats.rx_packets;
+			netdev_stats->rx_bytes += stats->node_stats.rx_bytes;
+			dropped += stats->dropped_pbuf_alloc_failed;
+			dropped += stats->dropped_linear_failed;
+			dropped += stats->shared_packet_count;
+			dropped += stats->ethernet_frame_error;
+		}
+		dropped += stats->dropped_next_node_queue_full;
+		netdev_stats->tx_dropped += dropped;
+		if (interface_type == NSS_DYNAMIC_INTERFACE_TYPE_CLMAP_DS) {
+			netdev_stats->rx_dropped += nss_cmn_rx_dropped_sum(&stats->node_stats);
+		}
+		break;
+
+	default:
+		nss_clmapmgr_info("%px: Unknown Event from NSS\n", dev);
+		break;
+	}
+
+	dev_put(dev);
+}
+
+/*
+ * nss_clmapmgr_us_get_if_num
+ * 	return us NSS interface number
+ */
+int nss_clmapmgr_us_get_if_num(struct net_device *dev)
+{
+	struct nss_clmapmgr_priv_t *priv;
+
+	if (!dev) {
+		nss_clmapmgr_info("Netdev is NULL");
+		return -1;
+	}
+
+	priv = (struct nss_clmapmgr_priv_t *)netdev_priv(dev);
+	return priv->nss_if_number_us;
+}
+EXPORT_SYMBOL(nss_clmapmgr_us_get_if_num);
+
+/*
+ * nss_clmapmgr_ds_get_if_num
+ * 	return ds NSS interface number
+ */
+int nss_clmapmgr_ds_get_if_num(struct net_device *dev)
+{
+	struct nss_clmapmgr_priv_t *priv;
+
+	if (!dev) {
+		nss_clmapmgr_info("Netdev is NULL");
+		return -1;
+	}
+
+	priv = (struct nss_clmapmgr_priv_t *)netdev_priv(dev);
+	return priv->nss_if_number_ds;
+}
+EXPORT_SYMBOL(nss_clmapmgr_ds_get_if_num);
+
+/*
+ * nss_clmapmgr_mac_add()
+ * 	API to send notification to NSS to add the MAC entry.
+ */
+nss_clmapmgr_status_t nss_clmapmgr_mac_add(struct net_device *dev, struct nss_clmapmgr_msg *clmapmsg)
+{
+	struct nss_clmap_msg req;
+	struct nss_clmap_mac_msg *mmsg;
+	int us_if, next_ifnum;
+	struct nss_ctx_instance *nss_ctx = NULL;
+	nss_tx_status_t status;
+
+	if (!dev) {
+		nss_clmapmgr_info("Netdev is NULL !!\n");
+		return NSS_CLMAPMGR_ERR_BAD_PARAM;
+	}
+
+	if (!clmapmsg) {
+		nss_clmapmgr_info("%px: nss_clmapmgr_msg is NULL !!\n", dev);
+		return NSS_CLMAPMGR_ERR_BAD_PARAM;
+	}
+
+	/*
+	 * Get Interface number, based on tunnel type
+	 */
+	switch (clmapmsg->tunnel_type) {
+	case NSS_CLMAPMGR_TUNNEL_EOGRE:
+		/*
+		 * For EoGRE tunnel, the next node for the packet from clmap node in NSS
+		 * would be GRE inner node. Get the GRE inner interface.
+		 */
+		next_ifnum = nss_eogremgr_get_if_num_inner(clmapmsg->tunnel_id);
+		if (next_ifnum < 0) {
+			nss_clmapmgr_info("%px: No NSS interface registered for the tunnel id: %d\n", dev, clmapmsg->tunnel_id);
+			return NSS_CLMAPMGR_ERR_TUNNEL_NOT_FOUND;
+		}
+		break;
+	default:
+		nss_clmapmgr_info("%px: Invalid tunnel type: %d\n", dev, clmapmsg->tunnel_type);
+		return NSS_CLMAPMGR_ERR_BAD_PARAM;
+	}
+
+	nss_ctx = nss_clmap_get_ctx();
+
+	/*
+	 * Check if upstream clmap interface is registered with NSS.
+	 */
+	us_if = nss_cmn_get_interface_number_by_dev_and_type(dev, NSS_DYNAMIC_INTERFACE_TYPE_CLMAP_US);
+	if (us_if < 0) {
+		nss_clmapmgr_info("%px: Net device is not registered with nss\n", dev);
+		dev_put(dev);
+		return NSS_CLMAPMGR_ERR_NETDEV_UNKNOWN;
+	}
+
+	memset(&req, 0, sizeof(struct nss_clmap_msg));
+	mmsg = &req.msg.mac_add;
+
+	/*
+	 * Set mac_add message.
+	 */
+	memcpy(mmsg->mac_addr, clmapmsg->mac_addr, ETH_ALEN);
+	mmsg->flags = clmapmsg->flags;
+	mmsg->vlan_id = clmapmsg->vlan_id;
+	mmsg->needed_headroom = clmapmsg->needed_headroom;
+	mmsg->nexthop_ifnum = next_ifnum;
+	nss_clmap_msg_init(&req, us_if, NSS_CLMAP_MSG_TYPE_MAC_ADD, sizeof(struct nss_clmap_mac_msg), NULL, NULL);
+	status = nss_clmap_tx_msg_sync(nss_ctx, &req);
+	if (status != NSS_TX_SUCCESS) {
+		nss_clmapmgr_warning("%px: nss clmap mac add command error:%d if_num: %d\n", dev, status, us_if);
+		dev_put(dev);
+		return NSS_CLMAPMGR_ERR_MAC_ADD_FAILED;
+	}
+
+	return NSS_CLMAPMGR_SUCCESS;
+}
+EXPORT_SYMBOL(nss_clmapmgr_mac_add);
+
+/*
+ * nss_clmapmgr_mac_remove()
+ * 	API to send notification to NSS to delete the MAC entry.
+ */
+nss_clmapmgr_status_t nss_clmapmgr_mac_remove(struct net_device *dev, uint8_t *mac_addr)
+{
+	struct nss_clmap_msg req;
+	struct nss_clmap_mac_msg *mmsg;
+	int us_if;
+	struct nss_ctx_instance *nss_ctx = NULL;
+	nss_tx_status_t status;
+
+	if (!dev) {
+		nss_clmapmgr_info("Netdev is NULL !!\n");
+		return NSS_CLMAPMGR_ERR_BAD_PARAM;
+	}
+
+	if (!mac_addr) {
+		nss_clmapmgr_info("%px: mac address is NULL !!\n", dev);
+		return NSS_CLMAPMGR_ERR_BAD_PARAM;
+	}
+
+	nss_ctx = nss_clmap_get_ctx();
+
+	/*
+	 * Check if upstream clmap interface is registered with NSS
+	 */
+	us_if = nss_cmn_get_interface_number_by_dev_and_type(dev, NSS_DYNAMIC_INTERFACE_TYPE_CLMAP_US);
+	if (us_if < 0) {
+		nss_clmapmgr_info("%px: Net device is not registered with nss\n", dev);
+		dev_put(dev);
+		return NSS_CLMAPMGR_ERR_NETDEV_UNKNOWN;
+	}
+
+	memset(&req, 0, sizeof(struct nss_clmap_msg));
+	mmsg = &req.msg.mac_add;
+
+	/*
+	 * Set mac_del message. Only MAC address is required, the other
+	 * fields as set to 0.
+	 *
+	 */
+	memcpy(mmsg->mac_addr, mac_addr, ETH_ALEN);
+	nss_clmap_msg_init(&req, us_if, NSS_CLMAP_MSG_TYPE_MAC_DEL, sizeof(struct nss_clmap_mac_msg), NULL, NULL);
+	status = nss_clmap_tx_msg_sync(nss_ctx, &req);
+	if (status != NSS_TX_SUCCESS) {
+		nss_clmapmgr_warning("%px: NSS clmap mac del command error:%d if_num: %d\n", dev, status, us_if);
+		dev_put(dev);
+		return NSS_CLMAPMGR_ERR_MAC_DEL_FAILED;
+	}
+
+	return NSS_CLMAPMGR_SUCCESS;
+}
+EXPORT_SYMBOL(nss_clmapmgr_mac_remove);
+
+/*
+ * nss_clmapmgr_mac_flush()
+ * 	API to send notification to NSS to flush MAC entry.
+ */
+nss_clmapmgr_status_t nss_clmapmgr_mac_flush(struct net_device *dev, uint32_t tunnel_id, nss_clmapmgr_tunnel_type_t tunnel_type)
+{
+	struct nss_clmap_msg req;
+	struct nss_clmap_flush_mac_msg *mmsg = &req.msg.mac_flush;
+	int us_if, next_ifnum;
+	struct nss_ctx_instance *nss_ctx = NULL;
+	nss_tx_status_t status;
+
+	if (!dev) {
+		nss_clmapmgr_info("Netdev is NULL !!\n");
+		return NSS_CLMAPMGR_ERR_BAD_PARAM;
+	}
+
+	switch (tunnel_type) {
+	case NSS_CLMAPMGR_TUNNEL_EOGRE:
+		/*
+		 * Get GRE inner interface number
+		 */
+		next_ifnum = nss_eogremgr_get_if_num_inner(tunnel_id);
+		if (next_ifnum < 0) {
+			nss_clmapmgr_info("%px: No NSS interface registered for the tunnel id: %d\n", dev, tunnel_id);
+			return NSS_CLMAPMGR_ERR_TUNNEL_NOT_FOUND;
+		}
+		break;
+	default:
+		nss_clmapmgr_info("%px: Invalid tunnel type: %d\n", dev, tunnel_type);
+		return NSS_CLMAPMGR_ERR_BAD_PARAM;
+	}
+
+	nss_ctx = nss_clmap_get_ctx();
+
+	/*
+	 * Check if upstream clmap interface is registered with NSS
+	 */
+	us_if = nss_cmn_get_interface_number_by_dev_and_type(dev, NSS_DYNAMIC_INTERFACE_TYPE_CLMAP_US);
+	if (us_if < 0) {
+		nss_clmapmgr_info("%px: Net device is not registered with nss\n", dev);
+		dev_put(dev);
+		return NSS_CLMAPMGR_ERR_NETDEV_UNKNOWN;
+	}
+
+	/*
+	 * Set mac_flush message
+	 */
+	mmsg->nexthop_ifnum = next_ifnum;
+	nss_clmap_msg_init(&req, us_if, NSS_CLMAP_MSG_TYPE_MAC_FLUSH, sizeof(struct nss_clmap_mac_msg), NULL, NULL);
+	status = nss_clmap_tx_msg_sync(nss_ctx, &req);
+	if (status != NSS_TX_SUCCESS) {
+		nss_clmapmgr_warning("%px: NSS clmap mac flush command error:%d if_num: %d\n", dev, status, us_if);
+		dev_put(dev);
+		return NSS_CLMAPMGR_ERR_MAC_FLUSH_FAILED;
+	}
+
+	return NSS_CLMAPMGR_SUCCESS;
+}
+EXPORT_SYMBOL(nss_clmapmgr_mac_flush);
+
+/*
+ * nss_clmapmgr_netdev_enable()
+ *	Call back to enable NSS for clmap device.
+ */
+int nss_clmapmgr_netdev_enable(struct net_device *dev)
+{
+	struct nss_clmap_msg req;
+	int us_if, ds_if;
+	struct nss_ctx_instance *nss_ctx = NULL;
+	struct nss_clmapmgr_priv_t *priv;
+	nss_tx_status_t status;
+
+	if (!dev) {
+		nss_clmapmgr_info("Netdev is NULL !!\n");
+		return NOTIFY_DONE;
+	}
+
+	nss_ctx = nss_clmap_get_ctx();
+
+	/*
+	 * Check if upstream clmap interface is registered with NSS
+	 */
+	us_if = nss_cmn_get_interface_number_by_dev_and_type(dev, NSS_DYNAMIC_INTERFACE_TYPE_CLMAP_US);
+	if (us_if < 0) {
+		nss_clmapmgr_info("%px: Net device is not registered with nss\n", dev);
+		goto release_ref;
+	}
+
+	/*
+	 * Check if downstream clmap interface is registered with NSS
+	 */
+	ds_if = nss_cmn_get_interface_number_by_dev_and_type(dev, NSS_DYNAMIC_INTERFACE_TYPE_CLMAP_DS);
+	if (ds_if < 0) {
+		nss_clmapmgr_info("%px: Net device is not registered with nss\n", dev);
+		goto release_ref;
+	}
+
+	/*
+	 * Send enable session command for upstream interface
+	 */
+	nss_clmap_msg_init(&req, us_if, NSS_CLMAP_MSG_TYPE_INTERFACE_ENABLE, 0, NULL, NULL);
+	status = nss_clmap_tx_msg_sync(nss_ctx, &req);
+	if (status != NSS_TX_SUCCESS) {
+		nss_clmapmgr_warning("%px: NSS clmap enable command error:%d if_num: %d\n", dev, status, us_if);
+		goto release_ref;
+	}
+
+	/*
+	 * Send enable session command for downstream interface
+	 */
+	nss_clmap_msg_init(&req, ds_if, NSS_CLMAP_MSG_TYPE_INTERFACE_ENABLE, 0, NULL, NULL);
+	status = nss_clmap_tx_msg_sync(nss_ctx, &req);
+	if (status != NSS_TX_SUCCESS) {
+		nss_clmapmgr_warning("%px: NSS clmap enable command error:%d if_num: %d\n", dev, status, ds_if);
+		goto disable_us;
+	}
+
+	/*
+	 * Open the netdev to accept packets
+	 */
+	priv = (struct nss_clmapmgr_priv_t *)netdev_priv(dev);
+	priv->clmap_enabled = true;
+	nss_clmapmgr_dev_open(dev);
+
+	return NOTIFY_OK;
+
+disable_us:
+	nss_clmap_msg_init(&req, us_if, NSS_CLMAP_MSG_TYPE_INTERFACE_DISABLE, 0, NULL, NULL);
+	status = nss_clmap_tx_msg_sync(nss_ctx, &req);
+	if (status != NSS_TX_SUCCESS) {
+		nss_clmapmgr_warning("%px: NSS clmap enable command error:%d if_num: %d\n", dev, status, us_if);
+	}
+
+release_ref:
+	return NOTIFY_DONE;
+}
+EXPORT_SYMBOL(nss_clmapmgr_netdev_enable);
+
+/*
+ * nss_clmapmgr_netdev_disable()
+ *	Call back to disable clmap interface in NSS.
+ */
+int nss_clmapmgr_netdev_disable(struct net_device *dev)
+{
+	struct nss_clmap_msg req;
+	int us_if, ds_if;
+	struct nss_ctx_instance *nss_ctx = NULL;
+	struct nss_clmapmgr_priv_t *priv;
+	nss_tx_status_t status;
+
+	if (!dev) {
+		nss_clmapmgr_info("Netdev is NULL !!\n");
+		return NOTIFY_DONE;
+	}
+
+	nss_ctx = nss_clmap_get_ctx();
+
+	/*
+	 * Check if upstream clmap interface is registered with NSS
+	 */
+	us_if = nss_cmn_get_interface_number_by_dev_and_type(dev, NSS_DYNAMIC_INTERFACE_TYPE_CLMAP_US);
+	if (us_if < 0) {
+		nss_clmapmgr_info("%px: Net device is not registered with NSS\n", dev);
+		goto release_ref;
+	}
+
+	/*
+	 * Check if downstream clmap interface is registered with NSS
+	 */
+	ds_if = nss_cmn_get_interface_number_by_dev_and_type(dev, NSS_DYNAMIC_INTERFACE_TYPE_CLMAP_DS);
+	if (ds_if < 0) {
+		nss_clmapmgr_info("%px: Net device is not registered with NSS\n", dev);
+		goto release_ref;
+	}
+
+	/*
+	 * Send disable session command for upstream interface
+	 */
+	nss_clmap_msg_init(&req, us_if, NSS_CLMAP_MSG_TYPE_INTERFACE_DISABLE, 0, NULL, NULL);
+	status = nss_clmap_tx_msg_sync(nss_ctx, &req);
+	if (status != NSS_TX_SUCCESS) {
+		nss_clmapmgr_warning("%px: NSS clmap disable command error:%d if_num: %d\n", dev, status, us_if);
+		goto release_ref;
+	}
+
+	/*
+	 * Send disable session command for downstream interface
+	 */
+	nss_clmap_msg_init(&req, ds_if, NSS_CLMAP_MSG_TYPE_INTERFACE_DISABLE, 0, NULL, NULL);
+	status = nss_clmap_tx_msg_sync(nss_ctx, &req);
+	if (status != NSS_TX_SUCCESS) {
+		nss_clmapmgr_warning("%px: NSS clmap disable command error:%d if_num: %d\n", dev, status, ds_if);
+		goto enable_us;
+	}
+
+	/*
+	 * Close the netdev
+	 */
+	priv = (struct nss_clmapmgr_priv_t *)netdev_priv(dev);
+	priv->clmap_enabled = false;
+	nss_clmapmgr_dev_close(dev);
+
+	return NOTIFY_OK;
+
+enable_us:
+	nss_clmap_msg_init(&req, us_if, NSS_CLMAP_MSG_TYPE_INTERFACE_ENABLE, 0, NULL, NULL);
+	status = nss_clmap_tx_msg_sync(nss_ctx, &req);
+	if (status != NSS_TX_SUCCESS) {
+		nss_clmapmgr_warning("%px: NSS clmap disable command error:%d if_num: %d\n", dev, status, us_if);
+	}
+
+release_ref:
+	return NOTIFY_DONE;
+}
+EXPORT_SYMBOL(nss_clmapmgr_netdev_disable);
+
+/*
+ * nss_clmapmgr_dev_event()
+ *	Netdevice notifier call back function.
+ */
+static int nss_clmapmgr_dev_event(struct notifier_block  *nb,
+		unsigned long event, void  *dev)
+{
+	struct net_device *netdev;
+	netdev = netdev_notifier_info_to_dev(dev);
+
+	switch (event) {
+	case NETDEV_UP:
+		return nss_clmapmgr_netdev_enable(netdev);
+
+	case NETDEV_DOWN:
+		return nss_clmapmgr_netdev_disable(netdev);
+
+	default:
+		break;
+	}
+
+	return NOTIFY_DONE;
+}
+
+/*
+ * nss_clmapmgr_destroy_us_interface()
+ *	Destroy upstream clmap interface.
+ */
+static nss_clmapmgr_status_t nss_clmapmgr_destroy_us_interface(struct net_device *dev, int interface_num)
+{
+	nss_tx_status_t status;
+	int retry = 0;
+
+	if (!nss_clmap_unregister(interface_num)) {
+		nss_clmapmgr_warning("%px: clmap NSS upstream interface unregister failed\n.", dev);
+		return NSS_CLMAPMGR_ERR_NSSIF_UNREGISTER_FAILED;
+	}
+
+dealloc_us:
+	status = nss_dynamic_interface_dealloc_node(interface_num, NSS_DYNAMIC_INTERFACE_TYPE_CLMAP_US);
+	if (status != NSS_TX_SUCCESS) {
+		nss_clmapmgr_info("%px: clmap dealloc node failure for interface_num = %d\n", dev, interface_num);
+		if (++retry <= NSS_CLMAPMGR_CMD_MAX_RETRY_COUNT) {
+			goto dealloc_us;
+		}
+
+		nss_clmapmgr_error("%px: fatal Error, failed to dealloc upstream clmap NSS interface.\n", dev);
+		return NSS_CLMAPMGR_ERR_NSSIF_DEALLOC_FAILED;
+	}
+
+	return NSS_CLMAPMGR_SUCCESS;
+}
+
+/*
+ * nss_clmapmgr_destroy_ds_interface()
+ *	Destroy downstream clmap interface.
+ */
+static nss_clmapmgr_status_t nss_clmapmgr_destroy_ds_interface(struct net_device *dev, int interface_num)
+{
+	nss_tx_status_t status;
+	int retry = 0;
+
+	if (!nss_clmap_unregister(interface_num)) {
+		nss_clmapmgr_warning("%px: clmap NSS downstream interface unregister failed\n.", dev);
+		return NSS_CLMAPMGR_ERR_NSSIF_UNREGISTER_FAILED;
+	}
+
+dealloc_ds:
+	status = nss_dynamic_interface_dealloc_node(interface_num, NSS_DYNAMIC_INTERFACE_TYPE_CLMAP_DS);
+	if (status != NSS_TX_SUCCESS) {
+		nss_clmapmgr_info("%px: clmap dealloc node failure for ds_if = %d\n", dev, interface_num);
+		if (++retry <= NSS_CLMAPMGR_CMD_MAX_RETRY_COUNT) {
+			goto dealloc_ds;
+		}
+
+		nss_clmapmgr_error("%px: fatal Error, failed to dealloc downstream clmap NSS interface.\n", dev);
+		return NSS_CLMAPMGR_ERR_NSSIF_DEALLOC_FAILED;
+	}
+
+	return NSS_CLMAPMGR_SUCCESS;
+}
+
+/*
+ * nss_clmapmgr_decongestion_callback()
+ * 	Wakeup netif queue if we were stopped by start_xmit
+ */
+static void nss_clmapmgr_decongestion_callback(void *arg) {
+	struct net_device *dev = arg;
+	struct nss_clmapmgr_priv_t *priv;
+
+	priv = (struct nss_clmapmgr_priv_t *)netdev_priv(dev);
+	if (unlikely(!priv->clmap_enabled)) {
+		return;
+	}
+
+	if (netif_queue_stopped(dev)) {
+		netif_wake_queue(dev);
+	}
+}
+
+/*
+ * nss_clmapmgr_netdev_destroy()
+ * 	API for destroying a netdevice.
+ * 	Note: User needs to flush all MAC entries in the clmap before destroying the clmap netdevice
+ */
+nss_clmapmgr_status_t nss_clmapmgr_netdev_destroy(struct net_device *dev)
+{
+	int us_if, ds_if;
+	nss_clmapmgr_status_t ret;
+
+	netif_tx_disable(dev);
+
+	/*
+	 * Deregister decongestion callback
+	 */
+	if (nss_cmn_unregister_queue_decongestion(nss_clmap_get_ctx(), nss_clmapmgr_decongestion_callback) != NSS_CB_UNREGISTER_SUCCESS) {
+		nss_clmapmgr_info("%px: failed to unregister decongestion callback\n", dev);
+	}
+
+	/*
+	 * Check if upstream clmap interface is registered with NSS
+	 */
+	us_if = nss_cmn_get_interface_number_by_dev_and_type(dev, NSS_DYNAMIC_INTERFACE_TYPE_CLMAP_US);
+	if (us_if < 0) {
+		nss_clmapmgr_info("%px: Net device is not registered with NSS\n", dev);
+		return NSS_CLMAPMGR_ERR_NETDEV_UNKNOWN;
+	}
+
+	/*
+	 * Check if downstream clmap interface is registered with NSS
+	 */
+	ds_if = nss_cmn_get_interface_number_by_dev_and_type(dev, NSS_DYNAMIC_INTERFACE_TYPE_CLMAP_DS);
+	if (ds_if < 0) {
+		nss_clmapmgr_info("%px: Net device is not registered with NSS\n", dev);
+		return NSS_CLMAPMGR_ERR_NETDEV_UNKNOWN;
+	}
+
+	ret = nss_clmapmgr_destroy_us_interface(dev, us_if);
+	if (ret != NSS_CLMAPMGR_SUCCESS) {
+		nss_clmapmgr_warning("%px: failed to destroy clmap upstream interface: %d\n", dev, us_if);
+		return ret;
+	}
+
+	ret = nss_clmapmgr_destroy_ds_interface(dev, ds_if);
+	if (ret != NSS_CLMAPMGR_SUCCESS) {
+		nss_clmapmgr_warning("%px: failed to destroy clmap downstream interface: %d\n", dev, ds_if);
+		return ret;
+	}
+
+	nss_clmapmgr_info("%px: deleted clmap instance, us_if = %d ds_if = %d\n",
+			dev, us_if, ds_if);
+
+	unregister_netdev(dev);
+	free_netdev(dev);
+	return NSS_CLMAPMGR_SUCCESS;
+}
+EXPORT_SYMBOL(nss_clmapmgr_netdev_destroy);
+
+/*
+ * nss_clmapmgr_netdev_create()
+ *	User API to create clmap interface
+ */
+struct net_device *nss_clmapmgr_netdev_create(void)
+{
+	struct nss_ctx_instance *nss_ctx;
+	struct net_device *dev = NULL;
+	struct nss_clmapmgr_priv_t *priv;
+	nss_tx_status_t status;
+	uint32_t features = 0;
+	int32_t us_if, ds_if;
+	int ret = -1, retry = 0;
+
+	dev = alloc_etherdev(sizeof(struct nss_clmapmgr_priv_t));
+	if (!dev) {
+		nss_clmapmgr_warning("Allocation of netdev failed\n");
+		return NULL;
+	}
+
+	nss_clmapmgr_setup(dev);
+
+	/*
+	 * Register net_device
+	 */
+	ret = register_netdev(dev);
+	if (ret) {
+		nss_clmapmgr_warning("%px: Netdevice registration failed\n", dev);
+		free_netdev(dev);
+		return NULL;
+	}
+
+	/*
+	 * Create NSS clmap downstream dynamic interface
+	 */
+	ds_if = nss_dynamic_interface_alloc_node(NSS_DYNAMIC_INTERFACE_TYPE_CLMAP_DS);
+	if (ds_if < 0) {
+		nss_clmapmgr_warning("%px: NSS dynamic interface alloc failed for clmap downstream\n", dev);
+		goto deregister_netdev;
+	}
+
+	/*
+	 * Create NSS clmap upstream dynamic interface
+	 */
+	us_if = nss_dynamic_interface_alloc_node(NSS_DYNAMIC_INTERFACE_TYPE_CLMAP_US);
+	if (us_if < 0) {
+		nss_clmapmgr_warning("%px: NSS dynamic interface alloc failed for clmap upstream\n", dev);
+		goto dealloc_ds_node;
+	}
+
+	priv = (struct nss_clmapmgr_priv_t *)netdev_priv(dev);
+	priv->clmap_enabled = false;
+	priv->nss_if_number_us = us_if;
+	priv->nss_if_number_ds = ds_if;
+
+	/*
+	 * Register downstream clmap interface with NSS
+	 */
+	nss_ctx = nss_clmap_register(ds_if,
+				NSS_DYNAMIC_INTERFACE_TYPE_CLMAP_DS,
+				nss_clmapmgr_ds_exception,
+	 			nss_clmapmgr_event_receive,
+				dev,
+				features);
+	if (!nss_ctx) {
+		nss_clmapmgr_info("%px: nss_clmap_register failed for downstream interface\n", dev);
+		goto dealloc_us_node;
+	}
+
+	/*
+	 * Register upstream clmap interface with NSS
+	 */
+	nss_ctx = nss_clmap_register(us_if,
+				NSS_DYNAMIC_INTERFACE_TYPE_CLMAP_US,
+				nss_clmapmgr_us_exception,
+	 			nss_clmapmgr_event_receive,
+				dev,
+				features);
+	if (!nss_ctx) {
+		nss_clmapmgr_info("%px: nss_clmap_register failed for upstream interface\n", dev);
+		goto unregister_ds;
+	}
+
+	/*
+	 * Register decongestion callback
+	 */
+	if (nss_cmn_register_queue_decongestion(nss_clmap_get_ctx(), nss_clmapmgr_decongestion_callback, dev) != NSS_CB_REGISTER_SUCCESS) {
+		nss_clmapmgr_warning("%px: failed to register decongestion callback\n", dev);
+		goto unregister_us;
+	}
+
+	/*
+	 * Success
+	 */
+	nss_clmapmgr_info("%px: nss_clmap_register() successful. nss_ctx = %px\n", dev, nss_ctx);
+	return dev;
+
+unregister_us:
+	nss_clmap_unregister(us_if);
+
+unregister_ds:
+	nss_clmap_unregister(ds_if);
+
+dealloc_us_node:
+	status = nss_dynamic_interface_dealloc_node(us_if, NSS_DYNAMIC_INTERFACE_TYPE_CLMAP_US);
+	if (status != NSS_TX_SUCCESS) {
+		if (++retry <= NSS_CLMAPMGR_CMD_MAX_RETRY_COUNT) {
+			goto dealloc_us_node;
+		}
+		nss_clmapmgr_error("%px: fatal Error, Unable to dealloc the node[%d] in the NSS FW!\n", dev, us_if);
+	}
+
+	retry = 0;
+dealloc_ds_node:
+	status = nss_dynamic_interface_dealloc_node(ds_if, NSS_DYNAMIC_INTERFACE_TYPE_CLMAP_DS);
+	if (status != NSS_TX_SUCCESS) {
+		if (++retry <= NSS_CLMAPMGR_CMD_MAX_RETRY_COUNT) {
+			goto dealloc_ds_node;
+		}
+		nss_clmapmgr_error("%px: fatal Error, Unable to dealloc the node[%d] in the NSS FW!\n", dev, ds_if);
+	}
+
+deregister_netdev:
+	unregister_netdev(dev);
+	free_netdev(dev);
+
+	return NULL;
+}
+EXPORT_SYMBOL(nss_clmapmgr_netdev_create);
+
+/*
+ * Linux Net device Notifier
+ */
+static struct notifier_block nss_clmapmgr_notifier = {
+	.notifier_call = nss_clmapmgr_dev_event,
+};
+
+/*
+ * nss_clmapmgr_dev_init_module()
+ *	Client map module init function
+ */
+static int __init nss_clmapmgr_dev_init_module(void)
+{
+#ifdef CONFIG_OF
+	/*
+	 * If the node is not compatible, don't do anything.
+	 */
+	if (!of_find_node_by_name(NULL, "nss-common")) {
+		return 0;
+	}
+#endif
+	register_netdevice_notifier(&nss_clmapmgr_notifier);
+
+	return 0;
+}
+
+/*
+ * nss_clmapmgr_exit_module
+ *	Client map module exit function
+ */
+static void __exit nss_clmapmgr_exit_module(void)
+{
+#ifdef CONFIG_OF
+	/*
+	 * If the node is not compatible, don't do anything.
+	 */
+	if (!of_find_node_by_name(NULL, "nss-common")) {
+		return;
+	}
+#endif
+	/*
+	 * Unregister clmap interfaces created
+	 */
+	unregister_netdevice_notifier(&nss_clmapmgr_notifier);
+}
+
+module_init(nss_clmapmgr_dev_init_module);
+module_exit(nss_clmapmgr_exit_module);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("NSS client map manager");
diff --git a/qca-nss-clients/clmapmgr/nss_clmapmgr_private.h b/qca-nss-clients/clmapmgr/nss_clmapmgr_private.h
new file mode 100644
index 0000000..28b44d6
--- /dev/null
+++ b/qca-nss-clients/clmapmgr/nss_clmapmgr_private.h
@@ -0,0 +1,92 @@
+/*
+ **************************************************************************
+ * Copyright (c) 2019 The Linux Foundation. All rights reserved.
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all copies.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ **************************************************************************
+ */
+
+/*
+ * nss_clmapmgr_private.h
+ *
+ * Private header file for NSS clmapmgr
+ */
+
+#ifndef _NSS_CLMAPMGR_PRIVATE_H_
+#define _NSS_CLMAPMGR_PRIVATE_H_
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if_ether.h>
+
+#include <nss_api_if.h>
+#include <nss_dynamic_interface.h>
+
+#include "nss_clmapmgr.h"
+
+/*
+ * clmap debug macros
+ */
+#if (NSS_CLMAPMGR_DEBUG_LEVEL < 1)
+#define nss_clmapmgr_assert(fmt, args...)
+#else
+#define nss_clmapmgr_assert(c)  BUG_ON(!(c));
+#endif
+
+#if defined(CONFIG_DYNAMIC_DEBUG)
+
+/*
+ * Compile messages for dynamic enable/disable
+ */
+#define nss_clmapmgr_warning(s, ...) pr_debug("%s[%d]:" s, __func__, __LINE__, ##__VA_ARGS__)
+#define nss_clmapmgr_info(s, ...) pr_debug("%s[%d]:" s, __func__, __LINE__, ##__VA_ARGS__)
+#define nss_clmapmgr_trace(s, ...) pr_debug("%s[%d]:" s, __func__, __LINE__, ##__VA_ARGS__)
+
+#else
+
+/*
+ * Statically compile messages at different levels
+ */
+#if (NSS_CLMAPMGR_DEBUG_LEVEL < 2)
+#define nss_clmapmgr_warning(s, ...)
+#else
+#define nss_clmapmgr_warning(s, ...) pr_warn("%s[%d]:" s, __func__, __LINE__, ##__VA_ARGS__)
+#endif
+
+#if (NSS_CLMAPMGR_DEBUG_LEVEL < 3)
+#define nss_clmapmgr_info(s, ...)
+#else
+#define nss_clmapmgr_info(s, ...)   pr_notice("%s[%d]:" s, __func__, __LINE__, ##__VA_ARGS__)
+#endif
+
+#if (NSS_CLMAPMGR_DEBUG_LEVEL < 4)
+#define nss_clmapmgr_trace(s, ...)
+#else
+#define nss_clmapmgr_trace(s, ...)  pr_info("%s[%d]:" s, __func__, __LINE__, ##__VA_ARGS__)
+#endif
+#endif
+#define nss_clmapmgr_error(s, ...) pr_err("%s[%d]:" s, __func__, __LINE__, ##__VA_ARGS__)
+
+/*
+ * nss_clmapmgr_priv_t
+ * 	Private structure for NSS clmapmgr.
+ */
+struct nss_clmapmgr_priv_t {
+	bool clmap_enabled;			/* Clmap status */
+	int nss_if_number_us;			/* Clmapmgr upstream NSS interface number */
+	int nss_if_number_ds;			/* Clmapmgr downstream NSS interface number */
+	struct rtnl_link_stats64 stats;		/* Netdev stats */
+};
+
+#endif
diff --git a/qca-nss-clients/dtls/v1.0/Makefile b/qca-nss-clients/dtls/v1.0/Makefile
new file mode 100644
index 0000000..410482a
--- /dev/null
+++ b/qca-nss-clients/dtls/v1.0/Makefile
@@ -0,0 +1,7 @@
+# Makefile for DTLS manager
+
+ccflags-y += $(NSS_CCFLAGS) -I$(obj)/../../exports
+ccflags-y += -DNSS_DTLSMGR_DEBUG_LEVEL=0
+
+obj-m += qca-nss-dtlsmgr.o
+qca-nss-dtlsmgr-objs := nss_connmgr_dtls.o nss_connmgr_dtls_netdev.o
diff --git a/qca-nss-clients/dtls/v1.0/nss_connmgr_dtls.c b/qca-nss-clients/dtls/v1.0/nss_connmgr_dtls.c
new file mode 100644
index 0000000..ec1356e
--- /dev/null
+++ b/qca-nss-clients/dtls/v1.0/nss_connmgr_dtls.c
@@ -0,0 +1,1047 @@
+/*
+ **************************************************************************
+ * Copyright (c) 2016-2017, 2020, The Linux Foundation. All rights reserved.
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all copies.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ **************************************************************************
+ */
+
+/*
+ * nss_connmgr_dtls.c
+ *	NSS DTLS Manager
+ */
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <net/ipv6.h>
+#include <linux/if_arp.h>
+#include <linux/etherdevice.h>
+#include <linux/atomic.h>
+
+#include <nss_api_if.h>
+#include <nss_dynamic_interface.h>
+
+#include "nss_connmgr_dtls.h"
+
+/*
+ * Global DTLS context
+ */
+static struct nss_dtlsmgr_ctx g_ctx;
+
+static struct nss_dtlsmgr_crypto_algo dtlsmgr_algo[NSS_DTLSMGR_ALGO_MAX] = {
+	[NSS_DTLSMGR_ALGO_AES_CBC_SHA1_HMAC] = {
+		.cipher_algo = NSS_CRYPTO_CIPHER_AES_CBC,
+		.auth_algo = NSS_CRYPTO_AUTH_SHA1_HMAC,
+		.hash_len = NSS_CRYPTO_MAX_HASHLEN_SHA1 ,
+		.iv_len = NSS_CRYPTO_MAX_IVLEN_AES,
+	},
+	[NSS_DTLSMGR_ALGO_AES_CBC_SHA256_HMAC] = {
+		.cipher_algo = NSS_CRYPTO_CIPHER_AES_CBC,
+		.auth_algo = NSS_CRYPTO_AUTH_SHA256_HMAC,
+		.hash_len = NSS_CRYPTO_MAX_HASHLEN_SHA256 ,
+		.iv_len = NSS_CRYPTO_MAX_IVLEN_AES,
+	},
+	[NSS_DTLSMGR_ALGO_3DES_CBC_SHA1_HMAC] = {
+		.cipher_algo = NSS_CRYPTO_CIPHER_DES,
+		.auth_algo = NSS_CRYPTO_AUTH_SHA1_HMAC,
+		.hash_len = NSS_CRYPTO_MAX_HASHLEN_SHA1 ,
+		.iv_len = NSS_CRYPTO_MAX_IVLEN_DES,
+	},
+	[NSS_DTLSMGR_ALGO_3DES_CBC_SHA256_HMAC] = {
+		.cipher_algo = NSS_CRYPTO_CIPHER_DES,
+		.auth_algo = NSS_CRYPTO_AUTH_SHA256_HMAC,
+		.hash_len = NSS_CRYPTO_MAX_HASHLEN_SHA256,
+		.iv_len = NSS_CRYPTO_MAX_IVLEN_DES,
+	}
+};
+
+/*
+ * nss_dtlsmgr_session_insert()
+ *	Insert a DTLS session into global list of sessions.
+ *	Must be called with global context lock held.
+ */
+static bool nss_dtlsmgr_session_insert(struct nss_dtlsmgr_session *s)
+{
+	int32_t i;
+
+	assert_spin_locked(&g_ctx.lock);
+
+	for (i = 0; i < NSS_MAX_DTLS_SESSIONS; i++) {
+		if (g_ctx.session[i] == NULL) {
+			g_ctx.session[i] = s;
+			return true;
+		}
+	}
+
+	return false;
+}
+
+/*
+ * nss_dtlsmgr_session_remove()
+ *	Remove a DTLS session from list of sessions.
+ *	Must be called with global context lock held.
+ */
+static struct nss_dtlsmgr_session *nss_dtlsmgr_session_remove(uint32_t sif)
+{
+	int32_t i;
+	struct nss_dtlsmgr_session *s;
+
+	assert_spin_locked(&g_ctx.lock);
+
+	for (i = 0; i < NSS_MAX_DTLS_SESSIONS; i++) {
+		s = g_ctx.session[i];
+		if (!s)
+			continue;
+
+		nss_dtlsmgr_assert(s->magic == NSS_DTLSMGR_SESSION_MAGIC);
+		if (s->nss_dtls_if == sif) {
+			g_ctx.session[i] = NULL;
+			return s;
+		}
+	}
+
+	return NULL;
+}
+
+/*
+ * nss_dtlsmgr_session_find()
+ *	Find a DTLS session from list of sessions.
+ *	Must be called with global context lock held.
+ */
+static struct nss_dtlsmgr_session *nss_dtlsmgr_session_find(uint32_t sif)
+{
+	int32_t i;
+	struct nss_dtlsmgr_session *s;
+
+	assert_spin_locked(&g_ctx.lock);
+
+	for (i = 0; i < NSS_MAX_DTLS_SESSIONS; i++) {
+		s = g_ctx.session[i];
+		if (!s)
+			continue;
+
+		nss_dtlsmgr_assert(s->magic == NSS_DTLSMGR_SESSION_MAGIC);
+		if (s->nss_dtls_if == sif)
+			return s;
+	}
+
+	return NULL;
+}
+
+/*
+ * nss_dtlsmgr_session_cleanup()
+ *	Cleanup DTLS session
+ */
+static void nss_dtlsmgr_session_cleanup(struct nss_dtlsmgr_session *ds)
+{
+	nss_crypto_status_t crypto_status;
+
+	nss_dtlsmgr_info("%px: DTLS session I/F %u cleanup\n",
+			 &g_ctx, ds->nss_dtls_if);
+
+	nss_dtls_unregister_if(ds->nss_dtls_if);
+
+	nss_dtlsmgr_netdev_destroy(ds);
+
+	nss_dynamic_interface_dealloc_node(ds->nss_dtls_if,
+					   NSS_DYNAMIC_INTERFACE_TYPE_DTLS);
+
+	crypto_status = nss_crypto_session_free(g_ctx.crypto_hdl,
+						ds->crypto_idx_encap);
+	if (crypto_status != NSS_CRYPTO_STATUS_OK) {
+		nss_dtlsmgr_info("%px: dtls I/F:%u, unable to free crypto session id:%d\n",
+				 &g_ctx, ds->nss_dtls_if, ds->crypto_idx_encap);
+	}
+
+	crypto_status = nss_crypto_session_free(g_ctx.crypto_hdl,
+						ds->crypto_idx_decap);
+	if (crypto_status != NSS_CRYPTO_STATUS_OK) {
+		nss_dtlsmgr_info("%px: dtls I/F:%u, unable to free crypto session id:%d\n",
+				 &g_ctx, ds->nss_dtls_if, ds->crypto_idx_decap);
+	}
+
+	if (ds->cidx_decap_pending != NSS_CRYPTO_MAX_IDXS) {
+		crypto_status = nss_crypto_session_free(g_ctx.crypto_hdl,
+							ds->cidx_decap_pending);
+		if (crypto_status != NSS_CRYPTO_STATUS_OK) {
+			nss_dtlsmgr_info("%px: dtls I/F:%u, unable to free crypto session id:%d\n", &g_ctx, ds->nss_dtls_if, ds->cidx_decap_pending);
+		}
+	}
+
+	if (ds->cidx_encap_pending != NSS_CRYPTO_MAX_IDXS) {
+		crypto_status = nss_crypto_session_free(g_ctx.crypto_hdl,
+							ds->cidx_encap_pending);
+		if (crypto_status != NSS_CRYPTO_STATUS_OK) {
+			nss_dtlsmgr_info("%px: dtls I/F:%u, unable to free crypto session id:%d\n", &g_ctx, ds->nss_dtls_if, ds->cidx_encap_pending);
+		}
+	}
+
+	ds->magic = 0;
+	kfree(ds);
+}
+
+/*
+ * nss_dtlsmgr_session_find_and_ref()
+ *	Find a DTLS session from list of sessions and
+ *	increments reference count. Must be called with
+ *	global context lock held.
+ */
+static struct nss_dtlsmgr_session *nss_dtlsmgr_session_find_and_ref(uint32_t sif)
+{
+	struct nss_dtlsmgr_session *s;
+
+	s = nss_dtlsmgr_session_find(sif);
+	if (atomic_inc_and_test(&s->ref)) {
+		nss_dtlsmgr_assert(false);
+	}
+
+	return s;
+}
+
+/*
+ * nss_dtlsmgr_session_ref_dec()
+ *	Decrement reference count of a DTLS session.
+ *	Perform session cleanup if reference count falls to zero.
+ */
+static void nss_dtlsmgr_session_ref_dec(struct nss_dtlsmgr_session *s)
+{
+	if (atomic_dec_and_test(&s->ref)) {
+		nss_dtlsmgr_session_cleanup(s);
+	}
+}
+
+/*
+ * nss_connmgr_dtls_data_receive()
+ *	Handler to receive packets from NSS.
+ */
+static void nss_connmgr_dtls_data_receive(struct net_device *dev,
+					  struct sk_buff *skb,
+					  struct napi_struct *napi)
+{
+	struct nss_dtlsmgr_netdev_priv *priv;
+	struct nss_dtlsmgr_session *s;
+	__be32 meta;
+
+	BUG_ON(dev == NULL);
+	BUG_ON(skb == NULL);
+
+	dev_hold(dev);
+	priv = netdev_priv(dev);
+	s = priv->s;
+
+	/*
+	 * Get DTLS metadata
+	 */
+	meta = *(__be32 *)skb->data;
+	meta = ntohl(meta);
+	if (NSS_DTLSMGR_METADATA_CTYPE(meta) != NSS_DTLSMGR_CTYPE_APP) {
+		nss_dtlsmgr_info("%px: Dropping non app dtls pkt\n", skb);
+		dev_kfree_skb_any(skb);
+		dev_put(dev);
+		return;
+	}
+
+	if (NSS_DTLSMGR_METADATA_ERROR(meta) != NSS_DTLSMGR_METADATA_ERROR_OK) {
+		nss_dtlsmgr_info("%px: Dropping error pkt\n", skb);
+		dev_kfree_skb_any(skb);
+		dev_put(dev);
+		return;
+	}
+
+	/*
+	 * Remove four bytes at start of
+	 * buffer containing the DTLS metadata.
+	 */
+	skb_pull(skb, NSS_DTLSMGR_METADATA_LEN);
+
+	skb_reset_network_header(skb);
+	skb->pkt_type = PACKET_HOST;
+	skb->skb_iif = dev->ifindex;
+	skb->dev = dev;
+	if (s->flags & NSS_DTLSMGR_HDR_IPV6)
+		skb->protocol = htons(ETH_P_IPV6);
+	else
+		skb->protocol = htons(ETH_P_IP);
+
+	netif_receive_skb(skb);
+	dev_put(dev);
+}
+
+/*
+ * nss_connmgr_dtls_event_receive()
+ *	Event Callback to receive events from NSS
+ */
+static void nss_connmgr_dtls_event_receive(void *if_ctx,
+					   struct nss_dtls_msg *tnlmsg)
+{
+	struct nss_dtlsmgr_session *ds = (struct nss_dtlsmgr_session *)if_ctx;
+	struct nss_dtlsmgr_session_stats_update stats;
+	struct nss_dtls_session_stats *msg_stats;
+
+	spin_lock(&g_ctx.lock);
+	ds = nss_dtlsmgr_session_find_and_ref(tnlmsg->cm.interface);
+	spin_unlock(&g_ctx.lock);
+
+	if (!ds) {
+		return;
+	}
+
+	switch (tnlmsg->cm.type) {
+	case NSS_DTLS_MSG_SESSION_STATS:
+		if (ds->stats_update_cb == NULL)
+			break;
+
+		memset(&stats, 0, sizeof(struct nss_dtlsmgr_session_stats_update));
+		msg_stats = &tnlmsg->msg.stats;
+
+		stats.tx_pkts = msg_stats->node_stats.tx_packets;
+		stats.rx_pkts = msg_stats->node_stats.rx_packets;
+		stats.rx_dropped = nss_cmn_rx_dropped_sum(&msg_stats->node_stats);
+		stats.tx_auth_done = msg_stats->tx_auth_done;
+		stats.rx_auth_done = msg_stats->rx_auth_done;
+		stats.tx_cipher_done = msg_stats->tx_cipher_done;
+		stats.rx_cipher_done = msg_stats->rx_cipher_done;
+		stats.tx_cbuf_alloc_fail = msg_stats->tx_cbuf_alloc_fail;
+		stats.rx_cbuf_alloc_fail = msg_stats->rx_cbuf_alloc_fail;
+		stats.tx_cenqueue_fail = msg_stats->tx_cenqueue_fail;
+		stats.rx_cenqueue_fail = msg_stats->rx_cenqueue_fail;
+		stats.tx_dropped_hroom = msg_stats->tx_dropped_hroom;
+		stats.tx_dropped_troom = msg_stats->tx_dropped_troom;
+		stats.tx_forward_enqueue_fail = msg_stats->tx_forward_enqueue_fail;
+		stats.rx_forward_enqueue_fail = msg_stats->rx_forward_enqueue_fail;
+		stats.rx_invalid_version = msg_stats->rx_invalid_version;
+		stats.rx_invalid_epoch = msg_stats->rx_invalid_epoch;
+		stats.rx_malformed = msg_stats->rx_malformed;
+		stats.rx_cipher_fail = msg_stats->rx_cipher_fail;
+		stats.rx_auth_fail = msg_stats->rx_auth_fail;
+		stats.rx_capwap_classify_fail = msg_stats->rx_capwap_classify_fail;
+		stats.rx_replay_fail = msg_stats->rx_replay_fail;
+		stats.rx_replay_duplicate = msg_stats->rx_replay_duplicate;
+		stats.rx_replay_out_of_window = msg_stats->rx_replay_out_of_window;
+		stats.outflow_queue_full = msg_stats->outflow_queue_full;
+		stats.decap_queue_full = msg_stats->decap_queue_full;
+		stats.pbuf_alloc_fail = msg_stats->pbuf_alloc_fail;
+		stats.pbuf_copy_fail = msg_stats->pbuf_copy_fail;
+		stats.epoch = msg_stats->epoch;
+		stats.tx_seq_high = msg_stats->tx_seq_high;
+		stats.tx_seq_low = msg_stats->tx_seq_low;
+
+		ds->stats_update_cb(ds->nss_dtls_if, &stats);
+		break;
+
+	default:
+		nss_dtlsmgr_info("%px: Unknown Event from NSS\n", &g_ctx);
+		break;
+	}
+
+	nss_dtlsmgr_session_ref_dec(ds);
+}
+
+/*
+ * nss_dtlsmgr_alloc_crypto()
+ *	Allocate a crypto session and update encrypt/decrypt session parameters.
+ */
+nss_dtlsmgr_status_t nss_dtlsmgr_alloc_crypto(struct nss_dtlsmgr_crypto *crypto, uint32_t *crypto_idx, bool encrypt)
+{
+	struct nss_dtlsmgr_crypto_algo *algo;
+	struct nss_crypto_params params;
+	struct nss_crypto_key cipher;
+	struct nss_crypto_key auth;
+
+	memset(&cipher, 0, sizeof(struct nss_crypto_key));
+	memset(&auth, 0, sizeof(struct nss_crypto_key));
+
+	if (crypto->algo >= NSS_DTLSMGR_ALGO_MAX) {
+		nss_dtlsmgr_info("%px: invalid algorithm type %d", &g_ctx, crypto->algo);
+		return NSS_DTLSMGR_FAIL_NOCRYPTO;
+	}
+
+	algo = &dtlsmgr_algo[crypto->algo];
+
+	cipher.algo = algo->cipher_algo;
+	cipher.key = crypto->cipher_key.data;
+	cipher.key_len = crypto->cipher_key.len;
+
+	auth.algo = algo->auth_algo;
+	auth.key = crypto->auth_key.data;
+	auth.key_len = crypto->auth_key.len;
+
+	if (nss_crypto_session_alloc(g_ctx.crypto_hdl, &cipher, &auth, crypto_idx) != NSS_CRYPTO_STATUS_OK) {
+		nss_dtlsmgr_info("%px: DTLS crypto alloc failed\n", &g_ctx);
+		return NSS_DTLSMGR_FAIL_NOCRYPTO;
+	}
+
+	/*
+	 * Update crypto session
+	 */
+	memset(&params, 0, sizeof(struct nss_crypto_params));
+	params.cipher_skip = NSS_DTLSMGR_HDR_LEN + algo->iv_len;
+	params.auth_skip = 0;
+	params.req_type = (encrypt ? NSS_CRYPTO_REQ_TYPE_ENCRYPT : NSS_CRYPTO_REQ_TYPE_DECRYPT);
+	params.req_type |= NSS_CRYPTO_REQ_TYPE_AUTH;
+
+	if (nss_crypto_session_update(g_ctx.crypto_hdl, *crypto_idx, &params) != NSS_CRYPTO_STATUS_OK) {
+		nss_dtlsmgr_info("%px: failed to update crypto session %d", &g_ctx, *crypto_idx);
+		nss_crypto_session_free(g_ctx.crypto_hdl, *crypto_idx);
+		return NSS_DTLSMGR_FAIL_NOCRYPTO;
+	}
+
+	nss_dtlsmgr_info("%px: auth_skip:%d cipher_skip:%d\n", &g_ctx, params.auth_skip, params.cipher_skip);
+	return NSS_DTLSMGR_OK;
+}
+
+/*
+ * nss_dtlsmgr_session_create()
+ *	Create DTLS session and associated crypto sessions.
+ */
+struct net_device *nss_dtlsmgr_session_create(struct nss_dtlsmgr_config *cfg)
+{
+	struct nss_dtlsmgr_session *ds;
+	struct nss_dtls_msg dtlsmsg;
+	int32_t i = 0;
+	struct nss_dtlsmgr_crypto_algo *encap_algo, *decap_algo;
+	struct nss_dtls_session_configure *scfg;
+	nss_tx_status_t status;
+	nss_crypto_status_t crypto_status;
+	enum nss_dtlsmgr_status ret = NSS_DTLSMGR_FAIL;
+	uint32_t features = 0;
+	uint32_t mtu_adjust;
+
+	if ((cfg->encap.ver != NSS_DTLSMGR_VERSION_1_0) && (cfg->encap.ver != NSS_DTLSMGR_VERSION_1_2)) {
+		nss_dtlsmgr_warn("%px: Invalid DTLS version\n", &g_ctx);
+		return NULL;
+	}
+
+	/*
+	 * Allocate memory for new dtls session
+	 */
+	ds = kzalloc(sizeof(struct nss_dtlsmgr_session), GFP_KERNEL);
+	if (!ds) {
+		nss_dtlsmgr_info("%px: DTLS client allocation failed\n", &g_ctx);
+		return NULL;
+	}
+
+	/*
+	 * Create crypto session for encap
+	 */
+	ret = nss_dtlsmgr_alloc_crypto(&cfg->encap.crypto, &ds->crypto_idx_encap, true);
+	if (ret != NSS_DTLSMGR_OK) {
+		nss_dtlsmgr_info("failed to create encap session %d", ret);
+		goto dtls_crypto_encap_alloc_fail;
+	}
+
+	/*
+	 * Create crypto session for decap
+	 */
+	ret = nss_dtlsmgr_alloc_crypto(&cfg->decap.crypto, &ds->crypto_idx_decap, false);
+	if (ret != NSS_DTLSMGR_OK) {
+		nss_dtlsmgr_info("failed to create decap session %d", ret);
+		goto dtls_crypto_decap_alloc_fail;
+	}
+
+	/*
+	 * Allocate NSS dynamic interface
+	 */
+	ds->nss_dtls_if = nss_dynamic_interface_alloc_node(NSS_DYNAMIC_INTERFACE_TYPE_DTLS);
+	if (ds->nss_dtls_if == -1) {
+		nss_dtlsmgr_info("%px: DTLS dynamic I/F alloc failed\n", &g_ctx);
+		goto dtls_dynamic_if_alloc_fail;
+	}
+
+	/*
+	 * Create netdevice
+	 */
+	if (nss_dtlsmgr_netdev_create(ds) != NSS_DTLSMGR_OK) {
+		nss_dtlsmgr_info("%px: DTLS netdev creation failed\n", &g_ctx);
+		goto dtls_netdev_create_fail;
+	}
+
+	/*
+	 * Register NSS DTLS I/F
+	 */
+	ds->nss_ctx = nss_dtls_register_if(ds->nss_dtls_if,
+					   nss_connmgr_dtls_data_receive,
+					   nss_connmgr_dtls_event_receive,
+					   ds->netdev, features,
+					   (void *)ds);
+	if (ds->nss_ctx == NULL) {
+		nss_dtlsmgr_info("%px: DTLS dynamic I/F register failed\n", &g_ctx);
+		goto dtls_dynamic_if_register_fail;
+	}
+
+	/*
+	 * Initialize DTLS manager session
+	 */
+	ds->magic = NSS_DTLSMGR_SESSION_MAGIC;
+	ds->flags = cfg->flags;
+	ds->ver = cfg->encap.ver;
+	ds->sport = cfg->encap.sport;
+	ds->dport = cfg->encap.dport;
+	ds->epoch = cfg->encap.epoch;
+	ds->ip_ttl = cfg->encap.ip_ttl;
+	ds->nss_app_if = cfg->decap.nexthop_ifnum;
+	ds->window_size = cfg->decap.window_size;
+	ds->stats_update_cb = NULL; /* TODO */
+	ds->cidx_encap_pending = NSS_CRYPTO_MAX_IDXS;
+	ds->cidx_decap_pending = NSS_CRYPTO_MAX_IDXS;
+	atomic_set(&ds->ref, 1);
+
+	if (ds->flags & NSS_DTLSMGR_HDR_IPV6) {
+		for (i = 0; i < 4 ; i++) {
+			ds->sip.ipv6[i] = cfg->encap.sip[i];
+			ds->dip.ipv6[i] = cfg->encap.dip[i];
+		}
+	} else {
+		ds->sip.ipv4 = cfg->encap.sip[0];
+		ds->dip.ipv4 = cfg->encap.dip[0];
+	}
+
+	/*
+	 * Insert session into DTLS manager list
+	 */
+	spin_lock_bh(&g_ctx.lock);
+	if (!nss_dtlsmgr_session_insert(ds)) {
+		spin_unlock_bh(&g_ctx.lock);
+		goto dtls_session_insert_fail;
+	}
+	spin_unlock_bh(&g_ctx.lock);
+
+	/*
+	 * Send DTLS configure message to NSS
+	 */
+	memset(&dtlsmsg, 0, sizeof(struct nss_dtls_msg));
+	nss_dtls_msg_init(&dtlsmsg, ds->nss_dtls_if,
+			  NSS_DTLS_MSG_SESSION_CONFIGURE,
+			  sizeof(struct nss_dtls_session_configure),
+			  NULL, NULL);
+
+	encap_algo = &dtlsmgr_algo[cfg->encap.crypto.algo];
+	decap_algo = &dtlsmgr_algo[cfg->decap.crypto.algo];
+
+	scfg = &dtlsmsg.msg.cfg;
+	scfg->ver = ds->ver;
+	scfg->flags = ds->flags;
+	scfg->crypto_idx_encap = ds->crypto_idx_encap;
+	scfg->crypto_idx_decap = ds->crypto_idx_decap;
+	scfg->iv_len_encap = encap_algo->iv_len;
+	scfg->iv_len_decap = decap_algo->iv_len;
+	scfg->hash_len_encap = encap_algo->hash_len;
+	scfg->hash_len_decap = decap_algo->hash_len;
+	scfg->cipher_algo_encap = encap_algo->cipher_algo;
+	scfg->cipher_algo_decap = decap_algo->cipher_algo;
+	scfg->auth_algo_encap = encap_algo->auth_algo;
+	scfg->auth_algo_decap = decap_algo->auth_algo;
+	scfg->nss_app_if = ds->nss_app_if;
+	scfg->sport = ds->sport;
+	scfg->dport = ds->dport;
+	scfg->epoch = ds->epoch;
+	scfg->window_size = ds->window_size;
+	scfg->oip_ttl = ds->ip_ttl;
+
+	if (ds->flags & NSS_DTLSMGR_HDR_IPV6) {
+		for (i = 0; i < 4; i++) {
+			scfg->sip[i] = ds->sip.ipv6[i];
+			scfg->dip[i] = ds->dip.ipv6[i];
+		}
+	} else {
+		scfg->sip[0] = ds->sip.ipv4;
+		scfg->dip[0] = ds->dip.ipv4;
+	}
+
+	status = nss_dtls_tx_msg_sync(ds->nss_ctx, &dtlsmsg);
+	if (status != NSS_TX_SUCCESS) {
+		nss_dtlsmgr_info("%px: DTLS cfg msg tx failed\n", &g_ctx);
+		goto dtls_msg_tx_fail;
+	}
+
+	/*
+	 * Adjust MTU of netdev
+	 */
+	mtu_adjust = NSS_DTLSMGR_HDR_LEN;
+	mtu_adjust += sizeof(struct udphdr);
+
+	if (ds->flags & NSS_DTLSMGR_HDR_IPV6)
+		mtu_adjust += sizeof(struct ipv6hdr);
+	else
+		mtu_adjust += sizeof(struct iphdr);
+
+	if (ds->flags & NSS_DTLSMGR_HDR_CAPWAP)
+		mtu_adjust += NSS_DTLSMGR_CAPWAPHDR_LEN;
+
+	mtu_adjust += ((scfg->iv_len_encap * 2) + scfg->hash_len_encap);
+
+	ds->netdev->mtu -= mtu_adjust;
+
+	nss_dtlsmgr_info("%px: NSS DTLS session I/F:%d(%s) created\n",
+			 &g_ctx, ds->nss_dtls_if, ds->netdev->name);
+
+	return ds->netdev;
+
+dtls_msg_tx_fail:
+	spin_lock_bh(&g_ctx.lock);
+	nss_dtlsmgr_session_remove(ds->nss_dtls_if);
+	spin_unlock_bh(&g_ctx.lock);
+
+dtls_session_insert_fail:
+	nss_dtls_unregister_if(ds->nss_dtls_if);
+
+dtls_dynamic_if_register_fail:
+	nss_dtlsmgr_netdev_destroy(ds);
+
+dtls_netdev_create_fail:
+	nss_dynamic_interface_dealloc_node(ds->nss_dtls_if,
+					   NSS_DYNAMIC_INTERFACE_TYPE_DTLS);
+
+dtls_dynamic_if_alloc_fail:
+	crypto_status = nss_crypto_session_free(g_ctx.crypto_hdl,
+						ds->crypto_idx_decap);
+	if (crypto_status != NSS_CRYPTO_STATUS_OK) {
+		nss_dtlsmgr_info("%px: dtls I/F:%u, unable to free crypto session id:%d\n", &g_ctx, ds->nss_dtls_if, ds->crypto_idx_decap);
+	}
+
+dtls_crypto_decap_alloc_fail:
+	crypto_status = nss_crypto_session_free(g_ctx.crypto_hdl,
+						ds->crypto_idx_encap);
+	if (crypto_status != NSS_CRYPTO_STATUS_OK) {
+		nss_dtlsmgr_info("%px: dtls I/F:%u, unable to free crypto session id:%d\n", &g_ctx, ds->nss_dtls_if, ds->crypto_idx_encap);
+	}
+
+dtls_crypto_encap_alloc_fail:
+	ds->magic = 0;
+	kfree(ds);
+	return NULL;
+}
+EXPORT_SYMBOL(nss_dtlsmgr_session_create);
+
+/*
+ * nss_dtlsmgr_session_destroy()
+ *	Destroy DTLS session
+ */
+nss_dtlsmgr_status_t nss_dtlsmgr_session_destroy(struct net_device *dev)
+{
+	struct nss_dtlsmgr_netdev_priv *priv;
+	struct nss_dtlsmgr_session *ds;
+	nss_tx_status_t nss_status;
+	struct nss_dtls_msg dtlsmsg;
+
+	priv = netdev_priv(dev);
+	ds = priv->s;
+
+	if (!ds) {
+		return NSS_DTLSMGR_FAIL;
+	}
+
+	/*
+	 * Send DTLS session destroy command to FW
+	 */
+	memset(&dtlsmsg, 0, sizeof(struct nss_dtls_msg));
+	nss_dtls_msg_init(&dtlsmsg, (uint16_t)ds->nss_dtls_if,
+			  NSS_DTLS_MSG_SESSION_DESTROY, 0, NULL, NULL);
+
+	nss_status = nss_dtls_tx_msg_sync(ds->nss_ctx, &dtlsmsg);
+	if (nss_status != NSS_TX_SUCCESS) {
+		nss_dtlsmgr_warn("%px: Failed to send DTLS session destroy for I/F %u", &g_ctx, ds->nss_dtls_if);
+
+		return NSS_DTLSMGR_FAIL;
+	}
+
+	spin_lock_bh(&g_ctx.lock);
+	ds = nss_dtlsmgr_session_remove(ds->nss_dtls_if);
+	spin_unlock_bh(&g_ctx.lock);
+
+	if (!ds) {
+		return NSS_DTLSMGR_FAIL;
+	}
+
+	/*
+	 * Decrement reference count so as to drop it to zero
+	 */
+	nss_dtlsmgr_session_ref_dec(ds);
+
+	nss_dtlsmgr_info("%px: DTLS session I/F %u disabled\n", &g_ctx, ds->nss_dtls_if);
+
+	return NSS_DTLSMGR_OK;
+}
+EXPORT_SYMBOL(nss_dtlsmgr_session_destroy);
+
+/*
+ * nss_dtlsmgr_session_update_decap()
+ *	Update pending decap cipher of a DTLS session.
+ */
+nss_dtlsmgr_status_t nss_dtlsmgr_session_update_decap(struct net_device *dev, struct nss_dtlsmgr_config_update *udata)
+{
+	struct nss_dtlsmgr_netdev_priv *priv;
+	struct nss_dtlsmgr_session *ds;
+	nss_tx_status_t nss_status;
+	nss_crypto_status_t crypto_status;
+	enum nss_dtlsmgr_status ret;
+	struct nss_dtls_msg dtlsmsg;
+	struct nss_dtls_session_cipher_update *update;
+	struct nss_dtlsmgr_crypto_algo *decap_algo;
+
+	priv = netdev_priv(dev);
+	ds = priv->s;
+	if (!ds) {
+		return NSS_DTLSMGR_FAIL;
+	}
+
+	/*
+	 * Free any crypto session, for the decap pending cipher
+	 * state, allocated by a previous call to this API but
+	 * were subsequently not used for packet processing.
+	 */
+	if (ds->cidx_decap_pending != NSS_CRYPTO_MAX_IDXS) {
+		crypto_status = nss_crypto_session_free(g_ctx.crypto_hdl, ds->cidx_decap_pending);
+		if (crypto_status != NSS_CRYPTO_STATUS_OK) {
+			nss_dtlsmgr_info("%px: dtls I/F:%u, unable to free crypto session id:%d\n",
+					 &g_ctx, ds->nss_dtls_if, ds->cidx_decap_pending);
+		}
+
+		ds->cidx_decap_pending = NSS_CRYPTO_MAX_IDXS;
+	}
+
+	/*
+	 * Alloc crypto session for decap
+	 */
+	ret = nss_dtlsmgr_alloc_crypto(&udata->crypto, &ds->cidx_decap_pending, false);
+	if (ret != NSS_DTLSMGR_OK) {
+		nss_dtlsmgr_info("failed to rekey decap session %d", ret);
+		ds->cidx_decap_pending = NSS_CRYPTO_MAX_IDXS;
+		nss_dtlsmgr_session_ref_dec(ds);
+		return NSS_DTLSMGR_FAIL_NOCRYPTO;
+	}
+
+	decap_algo = &dtlsmgr_algo[udata->crypto.algo];
+
+	/*
+	 * Initialize DTLS session Rx cipher update message
+	 */
+	memset(&dtlsmsg, 0, sizeof(struct nss_dtls_msg));
+	update = &dtlsmsg.msg.cipher_update;
+	update->crypto_idx = ds->cidx_decap_pending;
+	update->epoch = udata->epoch;
+	update->iv_len = decap_algo->iv_len;
+	update->hash_len = decap_algo->hash_len;
+	update->auth_algo = decap_algo->auth_algo;
+	update->cipher_algo = decap_algo->cipher_algo;
+
+	nss_dtls_msg_init(&dtlsmsg, (uint16_t)ds->nss_dtls_if,
+			  NSS_DTLS_MSG_REKEY_DECAP_CIPHER_UPDATE,
+			  sizeof(struct nss_dtls_session_cipher_update), NULL, NULL);
+
+	/*
+	 * Send DTLS session Rx cipher update command to FW
+	 */
+	nss_status = nss_dtls_tx_msg_sync(ds->nss_ctx, &dtlsmsg);
+	if (nss_status != NSS_TX_SUCCESS) {
+		nss_crypto_session_free(g_ctx.crypto_hdl,
+					ds->cidx_decap_pending);
+		ds->cidx_decap_pending = NSS_CRYPTO_MAX_IDXS;
+		nss_dtlsmgr_session_ref_dec(ds);
+		return NSS_DTLSMGR_FAIL;
+	}
+
+	nss_dtlsmgr_session_ref_dec(ds);
+	return NSS_DTLSMGR_OK;
+}
+EXPORT_SYMBOL(nss_dtlsmgr_session_update_decap);
+
+/*
+ * nss_dtlsmgr_session_update_encap()
+ *	Update pending encap cipher of a DTLS session.
+ */
+nss_dtlsmgr_status_t nss_dtlsmgr_session_update_encap(struct net_device *dev, struct nss_dtlsmgr_config_update *udata)
+{
+	struct nss_dtlsmgr_netdev_priv *priv;
+	struct nss_dtlsmgr_session *ds;
+	nss_tx_status_t nss_status;
+	nss_crypto_status_t crypto_status;
+	enum nss_dtlsmgr_status ret;
+	struct nss_dtls_msg dtlsmsg;
+	struct nss_dtls_session_cipher_update *update;
+	struct nss_dtlsmgr_crypto_algo *encap_algo;
+
+	priv = netdev_priv(dev);
+	ds = priv->s;
+	if (!ds) {
+		return NSS_DTLSMGR_FAIL;
+	}
+
+	/*
+	 * Free any crypto session, for the encap pending cipher
+	 * state, allocated by a previous call to this API but
+	 * were subsequently not used for packet processing.
+	 */
+	if (ds->cidx_encap_pending != NSS_CRYPTO_MAX_IDXS) {
+		crypto_status = nss_crypto_session_free(g_ctx.crypto_hdl,
+							ds->cidx_encap_pending);
+		if (crypto_status != NSS_CRYPTO_STATUS_OK) {
+			nss_dtlsmgr_info("%px: dtls I/F:%u, unable to free crypto session id:%d\n", &g_ctx, ds->nss_dtls_if, ds->cidx_encap_pending);
+		}
+
+		ds->cidx_encap_pending = NSS_CRYPTO_MAX_IDXS;
+	}
+
+	/*
+	 * Alloc crypto session for decap
+	 */
+	ret = nss_dtlsmgr_alloc_crypto(&udata->crypto, &ds->cidx_encap_pending, true);
+	if (ret != NSS_DTLSMGR_OK) {
+		nss_dtlsmgr_info("failed to rekey encap session %d", ret);
+		ds->cidx_encap_pending = NSS_CRYPTO_MAX_IDXS;
+		nss_dtlsmgr_session_ref_dec(ds);
+		return NSS_DTLSMGR_FAIL_NOCRYPTO;
+	}
+
+	encap_algo = &dtlsmgr_algo[udata->crypto.algo];
+
+	/*
+	 * Initialize DTLS session Tx cipher update message
+	 */
+	memset(&dtlsmsg, 0, sizeof(struct nss_dtls_msg));
+	update = &dtlsmsg.msg.cipher_update;
+	update->crypto_idx = ds->cidx_encap_pending;
+	update->epoch = udata->epoch;
+	update->iv_len = encap_algo->iv_len;
+	update->hash_len = encap_algo->hash_len;
+	update->auth_algo = encap_algo->auth_algo;
+	update->cipher_algo = encap_algo->cipher_algo;
+
+	nss_dtls_msg_init(&dtlsmsg, (uint16_t)ds->nss_dtls_if,
+			  NSS_DTLS_MSG_REKEY_ENCAP_CIPHER_UPDATE,
+			  sizeof(struct nss_dtls_session_cipher_update), NULL, NULL);
+
+	/*
+	 * Send DTLS session Rx cipher update command to FW
+	 */
+	nss_status = nss_dtls_tx_msg_sync(ds->nss_ctx, &dtlsmsg);
+	if (nss_status != NSS_TX_SUCCESS) {
+		nss_crypto_session_free(g_ctx.crypto_hdl,
+					ds->cidx_encap_pending);
+		ds->cidx_encap_pending = NSS_CRYPTO_MAX_IDXS;
+		nss_dtlsmgr_session_ref_dec(ds);
+		return NSS_DTLSMGR_FAIL;
+	}
+
+	nss_dtlsmgr_session_ref_dec(ds);
+	return NSS_DTLSMGR_OK;
+}
+EXPORT_SYMBOL(nss_dtlsmgr_session_update_encap);
+
+/*
+ * nss_dtlsmgr_session_switch_decap()
+ *	Set pending decap cipher state of a DTLS session to current.
+ */
+bool nss_dtlsmgr_session_switch_decap(struct net_device *dev)
+{
+	struct nss_dtlsmgr_netdev_priv *priv;
+	struct nss_dtlsmgr_session *ds;
+	nss_tx_status_t nss_status;
+	struct nss_dtls_msg dtlsmsg;
+
+	priv = netdev_priv(dev);
+	ds = priv->s;
+	if (!ds) {
+		return false;
+	}
+
+	memset(&dtlsmsg, 0, sizeof(struct nss_dtls_msg));
+	nss_dtls_msg_init(&dtlsmsg, (uint16_t)ds->nss_dtls_if,
+			  NSS_DTLS_MSG_REKEY_DECAP_CIPHER_SWITCH,
+			  0, NULL, NULL);
+
+	/*
+	 * Send DTLS session Rx cipher switch command to FW
+	 */
+	nss_status = nss_dtls_tx_msg_sync(ds->nss_ctx, &dtlsmsg);
+	if (nss_status != NSS_TX_SUCCESS) {
+		nss_dtlsmgr_session_ref_dec(ds);
+		return false;
+	}
+
+	ds->crypto_idx_decap = ds->cidx_decap_pending;
+	ds->cidx_decap_pending = NSS_CRYPTO_MAX_IDXS;
+
+	nss_dtlsmgr_session_ref_dec(ds);
+	return true;
+}
+EXPORT_SYMBOL(nss_dtlsmgr_session_switch_decap);
+
+/*
+ * nss_dtlsmgr_session_switch_encap()
+ *	Set pending encap cipher state of a DTLS session to current.
+ */
+bool nss_dtlsmgr_session_switch_encap(struct net_device *dev)
+{
+	struct nss_dtlsmgr_netdev_priv *priv;
+	struct nss_dtlsmgr_session *ds;
+	nss_tx_status_t nss_status;
+	struct nss_dtls_msg dtlsmsg;
+
+	priv = netdev_priv(dev);
+	ds = priv->s;
+	if (!ds) {
+		return false;
+	}
+
+	memset(&dtlsmsg, 0, sizeof(struct nss_dtls_msg));
+	nss_dtls_msg_init(&dtlsmsg, (uint16_t)ds->nss_dtls_if,
+			  NSS_DTLS_MSG_REKEY_ENCAP_CIPHER_SWITCH,
+			  0, NULL, NULL);
+
+	/*
+	 * Send DTLS session Tx cipher switch command to FW
+	 */
+	nss_status = nss_dtls_tx_msg_sync(ds->nss_ctx, &dtlsmsg);
+	if (nss_status != NSS_TX_SUCCESS) {
+		nss_dtlsmgr_session_ref_dec(ds);
+		return false;
+	}
+
+	ds->crypto_idx_encap = ds->cidx_encap_pending;
+	ds->cidx_encap_pending = NSS_CRYPTO_MAX_IDXS;
+
+	nss_dtlsmgr_session_ref_dec(ds);
+	return true;
+}
+EXPORT_SYMBOL(nss_dtlsmgr_session_switch_encap);
+
+/*
+ * nss_dtlsmgr_get_interface()
+ *	Returns NSS DTLS interface number for encap/decap on success.
+ */
+int32_t nss_dtlsmgr_get_interface(struct net_device *dev, enum nss_dtlsmgr_interface_type type)
+{
+	int32_t ifnum;
+
+	if (type > NSS_DTLSMGR_INTERFACE_TYPE_MAX) {
+		nss_dtlsmgr_warn("%px: invalid interface type %d", dev, type);
+		return -EINVAL;
+	}
+
+	ifnum = nss_cmn_get_interface_number_by_dev_and_type(dev, NSS_DYNAMIC_INTERFACE_TYPE_DTLS);
+	if (ifnum < 0) {
+		nss_dtlsmgr_warn("%px: couldn't find DTLS interface number (%d)", dev, ifnum);
+		return ifnum;
+	}
+
+	ifnum = nss_dtls_get_ifnum_with_coreid(ifnum);
+
+	return ifnum;
+}
+EXPORT_SYMBOL(nss_dtlsmgr_get_interface);
+
+/*
+ * nss_dtls_crypto_attach()
+ */
+static nss_crypto_user_ctx_t nss_dtls_crypto_attach(nss_crypto_handle_t crypto)
+{
+	struct nss_dtlsmgr_ctx *sc = &g_ctx;
+
+	sc->crypto_hdl = crypto;
+	nss_dtlsmgr_info("%px: DTLS client crypto attach\n", &g_ctx);
+	return (nss_crypto_user_ctx_t)sc;
+}
+
+/*
+ * nss_dtls_crypto_detach()
+ */
+static void nss_dtls_crypto_detach(nss_crypto_user_ctx_t uctx)
+{
+	struct nss_dtlsmgr_ctx *sc = NULL;
+
+	sc = (struct nss_dtlsmgr_ctx *)uctx;
+	nss_dtlsmgr_assert(sc == &g_ctx);
+
+	sc->crypto_hdl = NULL;
+	nss_dtlsmgr_info("%px: DTLS client crypto detach\n", &g_ctx);
+}
+
+/*
+ * nss_dtls_init_module()
+ */
+int __init nss_dtls_init_module(void)
+{
+	int32_t i;
+
+	nss_dtlsmgr_info("%px: NSS DTLS Manager\n", &g_ctx);
+
+	for (i = 0; i < NSS_MAX_DTLS_SESSIONS; i++) {
+		g_ctx.session[i] = NULL;
+	}
+
+	spin_lock_init(&(g_ctx.lock));
+	nss_crypto_register_user(nss_dtls_crypto_attach,
+				 nss_dtls_crypto_detach,
+				 "nss-dtls");
+	return 0;
+}
+
+/*
+ * nss_dtls_destroy_all_sessions()
+ */
+static void nss_dtls_destroy_all_sessions(void)
+{
+	nss_tx_status_t nss_status;
+	struct nss_dtls_msg dtlsmsg;
+	struct nss_dtlsmgr_session *ds;
+	int32_t i;
+
+	for (i = 0; i < NSS_MAX_DTLS_SESSIONS; i++) {
+		spin_lock_bh(&g_ctx.lock);
+		if (g_ctx.session[i] == NULL) {
+			spin_unlock_bh(&g_ctx.lock);
+			continue;
+		}
+
+		/*
+		 * Remove session from list of sessions
+		 */
+		ds = g_ctx.session[i];
+		g_ctx.session[i] = NULL;
+		spin_unlock_bh(&g_ctx.lock);
+
+		nss_dtlsmgr_assert(ds->magic == NSS_DTLSMGR_SESSION_MAGIC);
+
+		/*
+		 * Send DTLS session destroy command to FW
+		 */
+		memset(&dtlsmsg, 0, sizeof(struct nss_dtls_msg));
+		nss_dtls_msg_init(&dtlsmsg, (uint16_t)ds->nss_dtls_if,
+				  NSS_DTLS_MSG_SESSION_DESTROY, 0,
+				  NULL, NULL);
+
+		nss_status = nss_dtls_tx_msg_sync(ds->nss_ctx, &dtlsmsg);
+		if (nss_status != NSS_TX_SUCCESS)
+			nss_dtlsmgr_warn("%px: Failed to send DTLS session destroy for I/F %u\n", &g_ctx, ds->nss_dtls_if);
+
+		nss_dtlsmgr_session_ref_dec(ds);
+	}
+}
+
+/*
+ * nss_dtls_exit_module()
+ */
+void __exit nss_dtls_exit_module(void)
+{
+	nss_dtls_destroy_all_sessions();
+	nss_crypto_unregister_user(g_ctx.crypto_hdl);
+}
+
+module_init(nss_dtls_init_module);
+module_exit(nss_dtls_exit_module);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("NSS DTLS manager");
diff --git a/qca-nss-clients/dtls/v1.0/nss_connmgr_dtls.h b/qca-nss-clients/dtls/v1.0/nss_connmgr_dtls.h
new file mode 100644
index 0000000..b116c86
--- /dev/null
+++ b/qca-nss-clients/dtls/v1.0/nss_connmgr_dtls.h
@@ -0,0 +1,215 @@
+/*
+ **************************************************************************
+ * Copyright (c) 2016, The Linux Foundation. All rights reserved.
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all copies.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ **************************************************************************
+ */
+
+/*
+ * nss_connnmgr_dtls.h
+ */
+
+#ifndef _NSS_CONNMGR_DTLS_H_
+#define _NSS_CONNMGR_DTLS_H_
+
+#include <nss_dtlsmgr.h>
+#include <nss_crypto_if.h>
+
+/*
+ * Debug macros
+ */
+#if (NSS_DTLSMGR_DEBUG_LEVEL < 1)
+#define nss_dtlsmgr_assert(fmt, args...)
+#else
+#define nss_dtlsmgr_assert(c) BUG_ON(!(c))
+#endif
+
+#if defined(CONFIG_DYNAMIC_DEBUG)
+/*
+ * Compile messages for dynamic enable/disable
+ */
+#define nss_dtlsmgr_warn(s, ...) pr_debug("%s[%d]:" s, __func__, \
+					  __LINE__, ##__VA_ARGS__)
+#define nss_dtlsmgr_info(s, ...) pr_debug("%s[%d]:" s, __func__, \
+					  __LINE__, ##__VA_ARGS__)
+#define nss_dtlsmgr_trace(s, ...) pr_debug("%s[%d]:" s, __func__, \
+					   __LINE__, ##__VA_ARGS__)
+#else
+
+/*
+ * Statically compile messages at different levels
+ */
+#if (NSS_DTLSMGR_DEBUG_LEVEL < 2)
+#define nss_dtlsmgr_warn(s, ...)
+#else
+#define nss_dtlsmgr_warn(s, ...) pr_warn("%s[%d]:" s, __func__, \
+					 __LINE__, ##__VA_ARGS__)
+#endif
+
+#if (NSS_DTLSMGR_DEBUG_LEVEL < 3)
+#define nss_dtlsmgr_info(s, ...)
+#else
+#define nss_dtlsmgr_info(s, ...) pr_notice("%s[%d]:" s, __func__, \
+					   __LINE__, ##__VA_ARGS__)
+#endif
+
+#if (NSS_DTLSMGR_DEBUG_LEVEL < 4)
+#define nss_dtlsmgr_trace(s, ...)
+#else
+#define nss_dtlsmgr_trace(s, ...)  pr_info("%s[%d]:" s, __func__, \
+					   __LINE__, ##__VA_ARGS__)
+#endif
+#endif
+
+#define NSS_DTLSMGR_HDR_LEN 13			/* DTLS header length */
+#define NSS_DTLSMGR_CAPWAPHDR_LEN 4		/* CAPWAP-DTLS header length */
+#define NSS_DTLSMGR_SESSION_MAGIC 0x5d7eb219	/* DTLS session magic value */
+
+/*
+ * DTLS payload content type
+ */
+#define NSS_DTLSMGR_CTYPE_APP 23		/* Application data */
+
+/*
+ * DTLS metadata
+ */
+#define NSS_DTLSMGR_METADATA_LEN 4		/* DTLS metadata length */
+#define NSS_DTLSMGR_METADATA_CTYPE(m) (m >> 24)	/* DTLS metadata content type */
+#define NSS_DTLSMGR_METADATA_ERROR(m) ((m >> 16) & 0x00FF)
+						/* DTLS metadata error */
+/*
+ * DTLS metadata error types
+ */
+#define NSS_DTLSMGR_METADATA_ERROR_OK 0
+
+/**
+ * NSS DTLS crypto data
+ */
+struct nss_dtlsmgr_crypto_algo {
+	enum nss_crypto_cipher cipher_algo;
+	enum nss_crypto_auth auth_algo;
+	uint32_t iv_len;
+	uint32_t hash_len;
+};
+
+/**
+ * NSS DTLS session stats update
+ */
+struct nss_dtlsmgr_session_stats_update {
+	uint32_t tx_pkts;		/**< Tx packets */
+	uint32_t rx_pkts;		/**< Rx packets */
+	uint32_t rx_dropped;		/**< Rx drops */
+	uint32_t tx_auth_done;		/**< Tx authentication done */
+	uint32_t rx_auth_done;		/**< Rx successful authentication */
+	uint32_t tx_cipher_done;	/**< Tx cipher done */
+	uint32_t rx_cipher_done;	/**< Rx cipher done */
+	uint32_t tx_cbuf_alloc_fail;	/**< Tx crypto buffer allocation fail */
+	uint32_t rx_cbuf_alloc_fail;	/**< Rx crypto buffer allocation fail */
+	uint32_t tx_cenqueue_fail;	/**< Tx crypto enqueue fail */
+	uint32_t rx_cenqueue_fail;	/**< Rx crypto enqueue fail */
+	uint32_t tx_dropped_hroom;	/**< Tx drop due to
+					     insufficient headroom */
+	uint32_t tx_dropped_troom;	/**< Tx drop due to
+					     insufficient tailroom */
+	uint32_t tx_forward_enqueue_fail;
+					/**< Enqueue failed to forwarding
+					     node after encap */
+	uint32_t rx_forward_enqueue_fail;
+					/**< Enqueue failed to receiving
+					     node after decap */
+	uint32_t rx_invalid_version;	/**< Rx invalid DTLS version */
+	uint32_t rx_invalid_epoch;	/**< Rx invalid DTLS epoch */
+	uint32_t rx_malformed;		/**< Rx malformed DTLS record */
+	uint32_t rx_cipher_fail;	/**< Rx cipher fail */
+	uint32_t rx_auth_fail;		/**< Rx authentication fail */
+	uint32_t rx_capwap_classify_fail;
+					/**< Rx CAPWAP classification fail */
+	uint32_t rx_replay_fail;	/**< Rx anti-replay failures */
+	uint32_t rx_replay_duplicate;	/**< Rx anti-replay fail for
+					     duplicate record */
+	uint32_t rx_replay_out_of_window;
+					/**< Rx anti-replay fail for out
+					     of window record */
+	uint32_t outflow_queue_full;	/**< Tx drop due to encap queue full */
+	uint32_t decap_queue_full;	/**< Rx drop due to decap queue full */
+	uint32_t pbuf_alloc_fail;	/**< Buffer allocation fail */
+	uint32_t pbuf_copy_fail;	/**< Buffer copy fail */
+	uint16_t epoch;			/**< Current Epoch */
+	uint16_t tx_seq_high;		/**< Upper 16-bits of current
+					     sequence number */
+	uint32_t tx_seq_low;		/**< Lower 32-bits of current
+					     sequence number */
+};
+
+/**
+ * NSS DTLS session stats update callback
+ */
+typedef void (*nss_dtlsmgr_session_stats_update_cb_t)(uint32_t dtls_if, struct nss_dtlsmgr_session_stats_update *supdate);
+
+/**
+ *  * @brief IPv4/IPv6 address
+ *   */
+union nss_dtlsmgr_ip {
+        uint32_t ipv4;                  /**< IPv4 address */
+        uint32_t ipv6[4];               /**< IPv6 address */
+};
+
+/*
+ * DTLS Manager session
+ */
+struct nss_dtlsmgr_session {
+	uint32_t magic;			/* Magic value used to
+					   verify DTLS session */
+	atomic_t ref;			/* Reference counter */
+	uint32_t ver;			/* DTLS version */
+	uint32_t flags;			/* Session flags */
+	uint32_t crypto_idx_encap;	/* Current encap crypto session idx */
+	uint32_t crypto_idx_decap;	/* Current decap crypto session idx */
+	uint32_t cidx_encap_pending;	/* Pending encap crypto session idx */
+	uint32_t cidx_decap_pending;	/* Pending decap crypto session idx */
+	nss_dtlsmgr_session_stats_update_cb_t stats_update_cb;
+					/* Callback for Stats update */
+	uint32_t nss_dtls_if;		/* NSS DTLS session I/F */
+	struct nss_ctx_instance *nss_ctx;
+					/* NSS context */
+	struct net_device *netdev;	/* Netdevice */
+	uint16_t sport;			/* Source UDP/UDPLite port */
+	uint16_t dport;			/* Destination UDP/UDPLite port */
+	uint16_t window_size;		/* Anit-replay window size */
+	uint16_t epoch;			/* Current Epoch */
+	union nss_dtlsmgr_ip sip;	/* Source IPv4/IPv6 address */
+	union nss_dtlsmgr_ip dip;	/* Destination IPv4/IPv6 address */
+	uint32_t nss_app_if;		/* NSS I/F of application using
+					   this DTLS session */
+	uint8_t ip_ttl;			/* IP Time To Live */
+};
+
+/*
+ * DTLS Manager global context type
+ */
+struct nss_dtlsmgr_ctx {
+	nss_crypto_handle_t crypto_hdl;
+	spinlock_t lock;
+	struct nss_dtlsmgr_session *session[NSS_MAX_DTLS_SESSIONS];
+};
+
+/*
+ * DTLS Manager per session netdev private data
+ */
+struct nss_dtlsmgr_netdev_priv {
+	struct nss_dtlsmgr_session *s;
+};
+
+nss_dtlsmgr_status_t nss_dtlsmgr_netdev_create(struct nss_dtlsmgr_session *ds);
+nss_dtlsmgr_status_t nss_dtlsmgr_netdev_destroy(struct nss_dtlsmgr_session *ds);
+
+#endif
diff --git a/qca-nss-clients/dtls/v1.0/nss_connmgr_dtls_netdev.c b/qca-nss-clients/dtls/v1.0/nss_connmgr_dtls_netdev.c
new file mode 100644
index 0000000..06cdbe3
--- /dev/null
+++ b/qca-nss-clients/dtls/v1.0/nss_connmgr_dtls_netdev.c
@@ -0,0 +1,222 @@
+/*
+ **************************************************************************
+ * Copyright (c) 2016, 2020, The Linux Foundation. All rights reserved.
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all copies.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ **************************************************************************
+ */
+
+/*
+ * nss_connmgr_dtls_netdev.c
+ *	NSS DTLS Manager netdev module
+ */
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <net/ipv6.h>
+#include <linux/if_arp.h>
+#include <linux/etherdevice.h>
+#include <linux/udp.h>
+#include <linux/ipv6.h>
+
+#include <nss_api_if.h>
+#include <nss_dynamic_interface.h>
+
+#include "nss_connmgr_dtls.h"
+
+/*
+ * Maximum tailroom required by crypto
+ */
+#define NSS_DTLSMGR_TROOM (128 + (2 * NSS_CRYPTO_MAX_HASHLEN_SHA256))
+
+/*
+ * Maximum headroom for encapsulating headers
+ */
+#define NSS_DTLSMGR_MAX_HDR_LEN ((NSS_DTLSMGR_HDR_LEN + 3)	\
+				 + NSS_DTLSMGR_CAPWAPHDR_LEN	\
+				 + (2 * NSS_CRYPTO_MAX_IVLEN_AES)	\
+				 + sizeof(struct ipv6hdr)	\
+				 + sizeof(struct udphdr))
+
+/*
+ * nss_dtlsmgr_session_xmit()
+ */
+static netdev_tx_t nss_dtlsmgr_session_xmit(struct sk_buff *skb,
+					    struct net_device *dev)
+{
+	struct nss_dtlsmgr_netdev_priv *priv;
+	struct nss_dtlsmgr_session *s;
+	int32_t  nhead, ntail;
+
+	priv = netdev_priv(dev);
+	s = priv->s;
+
+	switch (skb->protocol) {
+	case htons(ETH_P_IP):
+		if (s->flags & NSS_DTLSMGR_HDR_IPV6) {
+			nss_dtlsmgr_info("%px: NSS DTLS I/F %d: skb(%px) invalid L3 protocol 0x%x\n", dev, s->nss_dtls_if, skb, ETH_P_IP);
+			return NETDEV_TX_BUSY;
+		}
+		break;
+
+	case htons(ETH_P_IPV6):
+		if (!(s->flags & NSS_DTLSMGR_HDR_IPV6)) {
+			nss_dtlsmgr_info("%px: NSS DTLS I/F %d: skb(%px) invalid L3 protocol 0x%x\n", dev, s->nss_dtls_if, skb, ETH_P_IPV6);
+			return NETDEV_TX_BUSY;
+		}
+		break;
+
+	default:
+		nss_dtlsmgr_info("%px: NSS DTLS I/F %d: skb(%px) unsupported IP protocol 0x%x\n", dev, s->nss_dtls_if, skb, ntohs(skb->protocol));
+		return NETDEV_TX_BUSY;
+	}
+
+	nhead = dev->needed_headroom;
+	ntail = dev->needed_tailroom;
+
+	if (skb_is_nonlinear(skb)) {
+		nss_dtlsmgr_info("%px: NSS DTLS does not support non-linear skb %px\n", dev, skb);
+		return NETDEV_TX_BUSY;
+	}
+
+	if (unlikely(skb_shared(skb))) {
+		nss_dtlsmgr_info("%px: Shared skb:%px is not supported\n",
+				 dev, skb);
+		return NETDEV_TX_BUSY;
+	}
+
+	if (skb_cloned(skb) || (skb_headroom(skb) < nhead)
+	    || (skb_tailroom(skb) < ntail)) {
+		if (pskb_expand_head(skb, nhead, ntail, GFP_KERNEL)) {
+			nss_dtlsmgr_info("%px: skb:%px unable to expand buffer\n",
+					 dev, skb);
+			return NETDEV_TX_BUSY;
+		}
+	}
+
+	if (skb->data != skb_network_header(skb)) {
+		skb_pull(skb, skb_network_offset(skb));
+	}
+
+	if (nss_dtls_tx_buf(skb, s->nss_dtls_if, s->nss_ctx) != NSS_TX_SUCCESS) {
+		return NETDEV_TX_BUSY;
+	}
+
+	return NETDEV_TX_OK;
+}
+
+/*
+ * nss_dtlsmgr_session_stop()
+ */
+static int nss_dtlsmgr_session_stop(struct net_device *dev)
+{
+	netif_stop_queue(dev);
+	return 0;
+}
+
+/*
+ * nss_dtlsmgr_session_open()
+ */
+static int nss_dtlsmgr_session_open(struct net_device *dev)
+{
+	netif_start_queue(dev);
+	return 0;
+}
+
+/*
+ * DTLS netdev ops
+ */
+static const struct net_device_ops nss_dtlsmgr_session_ops = {
+	.ndo_start_xmit = nss_dtlsmgr_session_xmit,
+	.ndo_open = nss_dtlsmgr_session_open,
+	.ndo_stop = nss_dtlsmgr_session_stop,
+	.ndo_set_mac_address = eth_mac_addr,
+};
+
+/*
+ * nss_dtlsmgr_dev_setup()
+ */
+static void nss_dtlsmgr_dev_setup(struct net_device *dev)
+{
+	dev->addr_len = ETH_ALEN;
+	dev->mtu = ETH_DATA_LEN;
+	dev->hard_header_len = NSS_DTLSMGR_MAX_HDR_LEN;
+	dev->needed_headroom = 0;
+	dev->needed_tailroom = NSS_DTLSMGR_TROOM;
+
+	dev->type = ARPHRD_ETHER;
+	dev->ethtool_ops = NULL;
+	dev->header_ops = NULL;
+	dev->netdev_ops = &nss_dtlsmgr_session_ops;
+	dev->destructor = NULL;
+
+	memcpy(dev->dev_addr, "\xaa\xbb\xcc\xdd\xee\xff", dev->addr_len);
+	memset(dev->broadcast, 0xff, dev->addr_len);
+	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+}
+
+/*
+ * nss_dtlsmgr_netdev_create()
+ */
+nss_dtlsmgr_status_t nss_dtlsmgr_netdev_create(struct nss_dtlsmgr_session *ds)
+{
+	struct net_device *dev;
+	struct nss_dtlsmgr_netdev_priv *priv;
+	int32_t err = 0;
+
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(3, 16, 0))
+	dev = alloc_netdev(sizeof(struct nss_dtlsmgr_netdev_priv),
+			   "qca-nss-dtls%d", nss_dtlsmgr_dev_setup);
+#else
+	dev = alloc_netdev(sizeof(struct nss_dtlsmgr_netdev_priv),
+			   "qca-nss-dtls%d", NET_NAME_UNKNOWN,
+			   nss_dtlsmgr_dev_setup);
+#endif
+
+	if (!dev) {
+		nss_dtlsmgr_info("DTLS netdev alloc failed\n");
+		return NSS_DTLSMGR_FAIL;
+	}
+
+	priv = netdev_priv(dev);
+	priv->s = ds;
+
+	err = rtnl_is_locked() ? register_netdevice(dev) : register_netdev(dev);
+	if (err < 0) {
+		nss_dtlsmgr_info("DTLS netdev register failed\n");
+		free_netdev(dev);
+		return NSS_DTLSMGR_FAIL;
+	}
+
+	ds->netdev = dev;
+	return NSS_DTLSMGR_OK;
+}
+
+/*
+ * nss_dtlsmgr_netdev_destroy()
+ */
+nss_dtlsmgr_status_t nss_dtlsmgr_netdev_destroy(struct nss_dtlsmgr_session *ds)
+{
+	if (!ds || !ds->netdev) {
+		return NSS_DTLSMGR_FAIL;
+	}
+
+	rtnl_is_locked() ? unregister_netdevice(ds->netdev)
+			 : unregister_netdev(ds->netdev);
+
+	free_netdev(ds->netdev);
+	ds->netdev = NULL;
+	return NSS_DTLSMGR_OK;
+}
diff --git a/qca-nss-clients/dtls/v2.0/Makefile b/qca-nss-clients/dtls/v2.0/Makefile
new file mode 100644
index 0000000..adf87de
--- /dev/null
+++ b/qca-nss-clients/dtls/v2.0/Makefile
@@ -0,0 +1,12 @@
+# Makefile for DTLS manager
+
+ccflags-y += $(NSS_CCFLAGS) -I$(obj)/../../exports
+ccflags-y += -DNSS_DTLSMGR_DEBUG_LEVEL=0
+ccflags-y += -DNSS_DTLSMGR_BUILD_ID=\"'Build_ID - $(shell date +'%m/%d/%y, %H:%M:%S') SoC=$(SoC)'\"
+ccflags-y += -Wall -Werror
+
+obj-m += qca-nss-dtlsmgr.o
+qca-nss-dtlsmgr-objs += nss_dtlsmgr.o
+qca-nss-dtlsmgr-objs += nss_dtlsmgr_ctx.o
+qca-nss-dtlsmgr-objs += nss_dtlsmgr_ctx_dev.o
+qca-nss-dtlsmgr-objs += nss_dtlsmgr_ctx_stats.o
diff --git a/qca-nss-clients/dtls/v2.0/nss_dtlsmgr.c b/qca-nss-clients/dtls/v2.0/nss_dtlsmgr.c
new file mode 100644
index 0000000..3285fc1
--- /dev/null
+++ b/qca-nss-clients/dtls/v2.0/nss_dtlsmgr.c
@@ -0,0 +1,177 @@
+/*
+ **************************************************************************
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all copies.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ **************************************************************************
+ */
+
+/*
+ * nss_dtlsmgr.c
+ *	NSS DTLS Manager
+ */
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/crypto.h>
+#include <linux/debugfs.h>
+#include <linux/rtnetlink.h>
+#include <net/ipv6.h>
+#include <linux/if_arp.h>
+#include <linux/etherdevice.h>
+#include <linux/atomic.h>
+
+#include <crypto/algapi.h>
+#include <crypto/aead.h>
+#include <crypto/aes.h>
+#include <crypto/authenc.h>
+#include <crypto/des.h>
+#include <crypto/sha.h>
+#include <crypto/skcipher.h>
+#include <crypto/hash.h>
+
+#include <nss_api_if.h>
+#include <nss_dynamic_interface.h>
+
+#include <nss_cryptoapi.h>
+#include <nss_dtls_cmn.h>
+#include <nss_dtlsmgr.h>
+
+#include "nss_dtlsmgr_private.h"
+
+/*
+ * Global DTLS context
+ */
+struct nss_dtlsmgr g_dtls = {0};
+
+/*
+ * nss_dtlsmgr_node_configure_done()
+ *	Check and set the configured flag if the DTLS firmware package is successfully configured.
+ */
+static void nss_dtlsmgr_node_configure_done(void *app_data, struct nss_cmn_msg *ncm)
+{
+	struct nss_dtlsmgr *drv = app_data;
+
+	nss_dtlsmgr_info("%px: configure node(%u) response(%d) error(%d)\n", drv,
+			 ncm->interface, ncm->response, ncm->error);
+	atomic_cmpxchg(&drv->is_configured, false, (ncm->response == NSS_CMN_RESPONSE_ACK) ||
+			(ncm->error == NSS_DTLS_CMN_ERROR_ALREADY_CONFIGURED));
+}
+
+/*
+ * nss_dtlsmgr_node_configure()
+ *	Send a configure message to the DTLS firmware package.
+ */
+static void nss_dtlsmgr_node_configure(struct nss_dtlsmgr *drv, uint32_t if_num)
+{
+	struct nss_dtls_cmn_msg ndcm = {0};
+	nss_tx_status_t nss_status;
+
+	/*
+	 * Send DTLS configure message to NSS
+	 */
+	nss_dtls_cmn_msg_init(&ndcm, if_num, NSS_DTLS_CMN_MSG_TYPE_CONFIGURE_NODE, 0,
+			      nss_dtlsmgr_node_configure_done, drv);
+
+	nss_status = nss_dtls_cmn_tx_msg(drv->nss_ctx, &ndcm);
+	if (nss_status != NSS_TX_SUCCESS) {
+		nss_dtlsmgr_warn("%px: unable to send node configure (%u)\n", drv, if_num);
+		return;
+	}
+}
+
+/*
+ * nss_dtlsmgr_rx_event()
+ *	Handle the response notification from the firmware.
+ */
+static void nss_dtlsmgr_rx_event(void *app_data, struct nss_cmn_msg *ncm)
+{
+	struct nss_dtlsmgr *drv = app_data;
+
+	nss_dtlsmgr_trace("%px: received Node stats sync:%u\n", drv, ncm->interface);
+
+	/*
+	 * The firmware DTLS should not configure its DMA rings till it
+	 * knows that the HW is fully configured by host crypto driver.
+	 * Since, the firmware boots independent of the host. There is no
+	 * guarantee that the DMA will be ready for configuration when the
+	 * firmware is configuring itself. Thus our approach is to notify
+	 * the firmware to setup its DMA after the host is ready. Here we
+	 * use a simple approach of module load order where the DTLS Manager
+	 * loads after the crypto driver. The expectation is that the crypto
+	 * driver would configure the HW correctly and other modules dependent
+	 * upon it would get the chance to load. Once, the configuration is
+	 * done for DTLS when would like to avoid further configuration of the
+	 * DMA.
+	 *
+	 * TODO: Multiple items
+	 * - Use a nss_cryptoapi_xxx to detect whether the crypto is ready.
+	 * - Add a mechanism to switch to pointer for first time configuration
+	 * - Add support for node stats. Eventhough the node stats is already
+	 * captured in NSS driver.
+	 */
+	if (!atomic_read(&drv->is_configured)) {
+		nss_dtlsmgr_node_configure(drv, ncm->interface);
+		return;
+	}
+}
+
+/*
+ * nss_dtlsmgr_init_module()
+ *	Initialize the DTLS manager module.
+ */
+int __init nss_dtlsmgr_init_module(void)
+{
+	struct nss_dtlsmgr *drv = &g_dtls;
+
+	atomic_set(&drv->is_configured, false);
+
+	nss_dtlsmgr_trace("registering for base interface(%u)", NSS_DTLS_INTERFACE);
+	drv->nss_ctx = nss_dtls_cmn_notify_register(NSS_DTLS_INTERFACE, nss_dtlsmgr_rx_event, drv);
+	if (!drv->nss_ctx) {
+		nss_dtlsmgr_warn("%px: DTLS NSS context instance is NULL", drv);
+		return -ENODEV;
+	}
+
+	drv->root_dir = debugfs_create_dir("qca-nss-dtlsmgr", NULL);
+	if (!drv->root_dir)
+		nss_dtlsmgr_warn("Failed to create debugfs directory");
+
+	pr_info("qca-nss-dtlsmgr module loaded (%s)\n", NSS_DTLSMGR_BUILD_ID);
+	return 0;
+}
+
+/*
+ * nss_dtlsmgr_exit_module()
+ *	Remove the DTLS manager module.
+ */
+void __exit nss_dtlsmgr_exit_module(void)
+{
+	struct nss_dtlsmgr *drv = &g_dtls;
+
+	nss_dtls_cmn_notify_unregister(NSS_DTLS_INTERFACE);
+
+	debugfs_remove_recursive(drv->root_dir);
+
+	atomic_set(&drv->is_configured, false);
+
+	nss_dtlsmgr_info("dtls manger is unloaded");
+}
+
+module_init(nss_dtlsmgr_init_module);
+module_exit(nss_dtlsmgr_exit_module);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("NSS DTLS manager");
diff --git a/qca-nss-clients/dtls/v2.0/nss_dtlsmgr_ctx.c b/qca-nss-clients/dtls/v2.0/nss_dtlsmgr_ctx.c
new file mode 100644
index 0000000..f8432e0
--- /dev/null
+++ b/qca-nss-clients/dtls/v2.0/nss_dtlsmgr_ctx.c
@@ -0,0 +1,879 @@
+/*
+ **************************************************************************
+ * Copyright (c) 2017-2020, The Linux Foundation. All rights reserved.
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all copies.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ **************************************************************************
+ */
+
+/*
+ * nss_dtlsmgr_ctx.c
+ *	NSS DTLS Manager Context
+ */
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <linux/crypto.h>
+#include <linux/debugfs.h>
+#include <linux/rtnetlink.h>
+#include <net/ipv6.h>
+#include <net/vxlan.h>
+#include <linux/if_arp.h>
+#include <linux/etherdevice.h>
+#include <linux/atomic.h>
+#include <asm/cmpxchg.h>
+
+#include <crypto/algapi.h>
+#include <crypto/aead.h>
+#include <crypto/aes.h>
+#include <crypto/authenc.h>
+#include <crypto/des.h>
+#include <crypto/sha.h>
+#include <crypto/skcipher.h>
+#include <crypto/hash.h>
+
+#include <nss_api_if.h>
+#include <nss_dynamic_interface.h>
+
+#include <nss_cryptoapi.h>
+#include <nss_dtls_cmn.h>
+#include <nss_dtlsmgr.h>
+
+#include "nss_dtlsmgr_private.h"
+
+#define NSS_DTLSMGR_KEY_PARAM_SIZE RTA_SPACE(sizeof(struct crypto_authenc_key_param))
+
+extern struct nss_dtlsmgr g_dtls;
+
+static struct nss_dtlsmgr_algo_info dtlsmgr_algo_info[NSS_DTLSMGR_ALGO_MAX] = {
+	{"echainiv(authenc(hmac(sha1),cbc(aes)))", NSS_DTLSMGR_KEY_PARAM_SIZE},
+	{"echainiv(authenc(hmac(sha256),cbc(aes)))", NSS_DTLSMGR_KEY_PARAM_SIZE},
+	{"echainiv(authenc(hmac(sha1),cbc(des3_ede)))", NSS_DTLSMGR_KEY_PARAM_SIZE},
+	{"echainiv(authenc(hmac(sha256),cbc(des3_ede)))", NSS_DTLSMGR_KEY_PARAM_SIZE},
+	{"rfc4106(gcm(aes))", 0}
+};
+
+/*
+ * nss_dtlsmgr_ctx_alloc_crypto()
+ *	Allocate a crypto session through Linux CryptoAPI framework.
+ */
+static int nss_dtlsmgr_ctx_alloc_crypto(struct nss_dtlsmgr_ctx *ctx, struct nss_dtlsmgr_dtls_data *dtls,
+					struct nss_dtlsmgr_crypto *crypto)
+{
+	struct crypto_authenc_key_param *key_param;
+	struct nss_dtlsmgr_algo_info *info;
+	struct rtattr *rta;
+	char *keys, *p;
+	uint16_t keylen;
+
+	if (crypto->algo >= ARRAY_SIZE(dtlsmgr_algo_info)) {
+		nss_dtlsmgr_warn("%px: invalid crypto algorithm", ctx);
+		return -EINVAL;
+	}
+
+	info = &dtlsmgr_algo_info[crypto->algo];
+	dtls->aead = crypto_alloc_aead(info->name, 0, 0);
+	if (IS_ERR(dtls->aead)) {
+		nss_dtlsmgr_warn("%px: failed to allocate crypto aead context", ctx);
+		return -ENOMEM;
+	}
+
+	nss_dtlsmgr_trace("cipher_keylen:%d auth_keylen:%d nonce_len:%d\n",
+			  crypto->cipher_key.len, crypto->auth_key.len, crypto->nonce.len);
+
+	/*
+	 * Construct keys
+	 */
+	keylen = info->rta_key_size;
+	keylen += crypto->cipher_key.len;
+	keylen += crypto->auth_key.len;
+	keylen += crypto->nonce.len;
+
+	keys = vzalloc(keylen);
+	if (!keys) {
+		nss_dtlsmgr_warn("%px: failed to allocate key memory", ctx);
+		crypto_free_aead(dtls->aead);
+		return -ENOMEM;
+	}
+
+	if (crypto->algo == NSS_DTLSMGR_ALGO_AES_GCM) {
+		memcpy(keys, crypto->cipher_key.data, crypto->cipher_key.len);
+		/* Copy nonce after the key */
+		memcpy(keys + crypto->cipher_key.len, crypto->nonce.data, crypto->nonce.len);
+		goto setkey;
+	}
+
+	p = keys;
+	rta = (void *)p;
+	rta->rta_type = CRYPTO_AUTHENC_KEYA_PARAM;
+	rta->rta_len = RTA_LENGTH(sizeof(*key_param));
+	key_param = RTA_DATA(rta);
+	p += RTA_SPACE(sizeof(*key_param));
+
+	/*
+	 * Copy authentication key
+	 */
+	memcpy(p, crypto->auth_key.data, crypto->auth_key.len);
+	p += crypto->auth_key.len;
+
+	/*
+	 * Copy cipher Key
+	 */
+	key_param->enckeylen = cpu_to_be32(crypto->cipher_key.len);
+	memcpy(p, crypto->cipher_key.data, crypto->cipher_key.len);
+
+setkey:
+
+	if (crypto_aead_setkey(dtls->aead, keys, keylen)) {
+		nss_dtlsmgr_warn("%px: failed to configure keys", ctx);
+		vfree(keys);
+		crypto_free_aead(dtls->aead);
+		return -ENOSPC;
+	}
+
+	nss_cryptoapi_aead_ctx2session(dtls->aead, &dtls->crypto_idx);
+	dtls->blk_len = (uint8_t)crypto_aead_blocksize(dtls->aead);
+	dtls->hash_len = (uint8_t)crypto_aead_authsize(dtls->aead);
+	dtls->iv_len = (uint8_t)crypto_aead_ivsize(dtls->aead);
+
+	vfree(keys);
+	return 0;
+}
+
+/*
+ * nss_dtlsmgr_ctx_alloc_dtls()
+ *	Allocate a DTLS session.
+ */
+static struct nss_dtlsmgr_dtls_data *nss_dtlsmgr_ctx_alloc_dtls(struct nss_dtlsmgr_ctx *ctx,
+								struct nss_dtlsmgr_ctx_data *data,
+								struct nss_dtlsmgr_crypto *crypto)
+{
+	struct nss_dtlsmgr_dtls_data *dtls;
+	int error;
+
+	nss_dtlsmgr_trace("%px: allocating context data(%u)", ctx, data->di_type);
+
+	dtls = vzalloc(sizeof(*dtls));
+	if (!dtls) {
+		nss_dtlsmgr_warn("%px: failed to allocate dtls data(%u) ", ctx, data->di_type);
+		return NULL;
+	}
+
+	INIT_LIST_HEAD(&dtls->list);
+
+	error = nss_dtlsmgr_ctx_alloc_crypto(ctx, dtls, crypto);
+	if (error < 0) {
+		nss_dtlsmgr_warn("%px: unable to allocate crypto(%u) - error(%d)", ctx, data->di_type, error);
+		vfree(dtls);
+		return NULL;
+	}
+
+	nss_dtlsmgr_trace("%px: crypto_aead allocated", ctx);
+	return dtls;
+}
+
+/*
+ * nss_dtlsmgr_ctx_free_dtls()
+ *	Free the DTLS context.
+ */
+static void nss_dtlsmgr_ctx_free_dtls(struct nss_dtlsmgr_dtls_data *dtls)
+{
+	crypto_free_aead(dtls->aead);
+	vfree(dtls);
+}
+
+/*
+ * nss_dtlsmgr_ctx_configure_hdr()
+ *	Configure the DTLS header related information.
+ */
+static bool nss_dtlsmgr_ctx_configure_hdr(struct nss_dtlsmgr_ctx_data *data)
+{
+	const uint32_t type = NSS_DTLS_CMN_MSG_TYPE_CONFIGURE_HDR;
+	enum nss_dtls_cmn_error resp = NSS_DTLS_CMN_ERROR_NONE;
+	struct nss_dtls_cmn_ctx_config_hdr *cfg;
+	struct nss_dtls_cmn_msg ndcm = { {0} };
+	nss_tx_status_t status;
+	uint32_t mask = 0;
+
+	BUG_ON(in_atomic());
+
+	mask |= NSS_DTLS_CMN_CTX_HDR_IPV6;
+	mask |= NSS_DTLS_CMN_CTX_HDR_UDPLITE;
+	mask |= NSS_DTLS_CMN_CTX_HDR_CAPWAP;
+	mask |= NSS_DTLS_CMN_CTX_CIPHER_MODE_GCM;
+	mask |= NSS_DTLS_CMN_CTX_ENCAP_UDPLITE_CSUM;
+	mask |= NSS_DTLS_CMN_CTX_ENCAP_METADATA;
+	mask |= NSS_DTLS_CMN_CTX_DECAP_ACCEPT_ALL;
+
+	cfg = &ndcm.msg.hdr_cfg;
+	cfg->flags = data->flags & mask;
+	cfg->dest_ifnum = data->dest_ifnum;
+	cfg->src_ifnum = data->src_ifnum;
+
+	memcpy(cfg->sip, data->flow.sip, sizeof(cfg->sip));
+	memcpy(cfg->dip, data->flow.dip, sizeof(cfg->dip));
+
+	cfg->sport = data->flow.sport;
+	cfg->dport = data->flow.dport;
+	cfg->hop_limit_ttl = data->flow.hop_limit_ttl;
+	cfg->dscp = data->flow.dscp;
+	cfg->dscp_copy = data->flow.dscp_copy;
+	cfg->df = data->flow.df;
+
+	nss_dtlsmgr_trace("flags:0x%x dest_ifnum:0x%x src_ifnum:0x%x sport:0x%x dport:0x%x sip:0x%x dip:0x%x",
+			  cfg->flags, cfg->dest_ifnum, cfg->src_ifnum, cfg->sport, cfg->dport,
+			  cfg->sip[0], cfg->dip[0]);
+
+	status = nss_dtls_cmn_tx_msg_sync(data->nss_ctx, data->ifnum, type, sizeof(*cfg), &ndcm, &resp);
+	if (status != NSS_TX_SUCCESS) {
+		nss_dtlsmgr_warn("%px: msg_sync failed, if_num(%u), status(%d), type(%d), resp(%d)",
+				 data, data->ifnum, type, status, resp);
+		return false;
+	}
+
+	return true;
+}
+
+/*
+ * nss_dtlsmgr_ctx_configure_dtls()
+ *	Configure the DTLS version, crypto related data, window size and epoch.
+ */
+static bool nss_dtlsmgr_ctx_configure_dtls(struct nss_dtlsmgr_ctx_data *data, struct nss_dtlsmgr_dtls_data *dtls)
+{
+	const uint32_t type = NSS_DTLS_CMN_MSG_TYPE_CONFIGURE_DTLS;
+	enum nss_dtls_cmn_error resp = NSS_DTLS_CMN_ERROR_NONE;
+	struct nss_dtls_cmn_ctx_config_dtls *cfg;
+	struct nss_dtls_cmn_msg ndcm = {0};
+	nss_tx_status_t status;
+
+	BUG_ON(in_atomic());
+
+	cfg = &ndcm.msg.dtls_cfg;
+	cfg->ver = dtls->ver;
+	cfg->crypto_idx = dtls->crypto_idx;
+	cfg->epoch = dtls->epoch;
+	cfg->window_size = dtls->window_size;
+	cfg->iv_len = dtls->iv_len;
+	cfg->hash_len = dtls->hash_len;
+	cfg->blk_len = dtls->blk_len;
+
+	status = nss_dtls_cmn_tx_msg_sync(data->nss_ctx, data->ifnum, type, sizeof(*cfg), &ndcm, &resp);
+	if (status != NSS_TX_SUCCESS) {
+		nss_dtlsmgr_warn("%px: msg_sync failed, if_num(%u), status(%d), type(%d), resp(%d)",
+				data, data->ifnum, type, status, resp);
+		return false;
+	}
+
+	return true;
+}
+
+/*
+ * nss_dtlsmgr_ctx_deconfigure()
+ *	Deconfigure the DTLS context and free all the related data.
+ */
+static bool nss_dtlsmgr_ctx_deconfigure(struct nss_dtlsmgr_ctx *ctx, struct nss_dtlsmgr_ctx_data *data)
+{
+	const uint32_t type = NSS_DTLS_CMN_MSG_TYPE_DECONFIGURE;
+	enum nss_dtls_cmn_error resp = NSS_DTLS_CMN_ERROR_NONE;
+	struct nss_dtls_cmn_msg ndcm = {0};
+	struct nss_dtlsmgr_dtls_data *cur;
+	nss_tx_status_t status;
+
+	status = nss_dtls_cmn_tx_msg_sync(data->nss_ctx, data->ifnum, type, 0, &ndcm, &resp);
+	if (status != NSS_TX_SUCCESS) {
+		nss_dtlsmgr_warn("%px: msg_sync failed, if_num(%u), status(%d), type(%d), resp(%d)",
+				ctx, data->ifnum, type, status, resp);
+		return false;
+	}
+
+	nss_dtls_cmn_unregister_if(data->ifnum);
+
+	for (;;) {
+		write_lock(&ctx->lock);
+		cur = list_first_entry_or_null(&data->dtls_active, struct nss_dtlsmgr_dtls_data, list);
+		if (!cur) {
+			write_unlock(&ctx->lock);
+			break;
+		}
+
+		list_del(&cur->list);
+		write_unlock(&ctx->lock);
+		nss_dtlsmgr_ctx_free_dtls(cur);
+	}
+
+	status = nss_dynamic_interface_dealloc_node(data->ifnum, data->di_type);
+	if (status != NSS_TX_SUCCESS) {
+		nss_dtlsmgr_warn("%px: fail to deallocate dynamic(%d) interface(%u)", ctx, data->di_type, data->ifnum);
+		return false;
+	}
+
+	data->ifnum = -1;
+	return true;
+}
+
+/*
+ * nss_dtlsmgr_ctx_create_encap()
+ *	Create DTLS encapsulation dynamic interface and configure the DTLS context.
+ */
+static int nss_dtlsmgr_ctx_create_encap(struct nss_dtlsmgr_ctx *ctx, uint32_t ifnum,
+					uint32_t src_ifnum, struct nss_dtlsmgr_config *ndc)
+{
+	struct nss_dtlsmgr_encap_config *cfg = &ndc->encap;
+	struct nss_dtlsmgr_ctx_data *data = &ctx->encap;
+	struct nss_dtlsmgr_flow_data *flow = &data->flow;
+	struct nss_dtlsmgr_dtls_data *dtls;
+
+	dtls = nss_dtlsmgr_ctx_alloc_dtls(ctx, &ctx->encap, &cfg->crypto);
+	if (!dtls) {
+		nss_dtlsmgr_warn("%px: unable to allocate encap context data", ctx);
+		return -ENOMEM;
+	}
+
+	INIT_LIST_HEAD(&data->dtls_active);
+
+	data->di_type = NSS_DYNAMIC_INTERFACE_TYPE_DTLS_CMN_INNER;
+	data->ifnum = ifnum;
+	data->src_ifnum = src_ifnum;
+	data->flags = ndc->flags & (NSS_DTLSMGR_HDR_MASK | NSS_DTLSMGR_CRYPTO_MASK | NSS_DTLSMGR_ENCAP_MASK);
+	data->tailroom = dtls->blk_len + dtls->hash_len;
+	data->headroom = dtls->iv_len;
+
+	memcpy(&flow->sip, cfg->sip, sizeof(flow->sip));
+	memcpy(&flow->dip, cfg->dip, sizeof(flow->dip));
+
+	flow->sport = cfg->sport;
+	flow->dport = cfg->dport;
+	flow->dscp = cfg->dscp;
+	flow->dscp_copy = cfg->dscp_copy;
+	flow->df = cfg->df;
+	flow->hop_limit_ttl = cfg->ip_ttl;
+
+	dtls->epoch = cfg->epoch;
+	dtls->ver = cfg->ver;
+
+	data->headroom += NSS_DTLSMGR_DTLS_HDR_SZ;
+
+	/*
+	 * We need to provide the firmware the source and
+	 * destination interface number. This allows it
+	 * to work with dynamically created interfaces
+	 *
+	 */
+	switch (data->flags & (NSS_DTLSMGR_HDR_IPV6 | NSS_DTLSMGR_HDR_CAPWAP)) {
+	case NSS_DTLSMGR_HDR_IPV6 | NSS_DTLSMGR_HDR_CAPWAP:
+		data->dest_ifnum = NSS_IPV6_RX_INTERFACE;
+		data->headroom += sizeof(struct ipv6hdr);
+		data->headroom += NSS_DTLSMGR_CAPWAP_DTLS_HDR_SZ;
+		data->headroom += NSS_DTLSMGR_SGT_HDR_SZ;
+		break;
+	case NSS_DTLSMGR_HDR_IPV6:
+		data->dest_ifnum = NSS_IPV6_RX_INTERFACE;
+		data->headroom += sizeof(struct ipv6hdr);
+		break;
+	case NSS_DTLSMGR_HDR_CAPWAP:
+		data->dest_ifnum = NSS_IPV4_RX_INTERFACE;
+		data->headroom += sizeof(struct iphdr);
+		data->headroom += NSS_DTLSMGR_CAPWAP_DTLS_HDR_SZ;
+		data->headroom += NSS_DTLSMGR_SGT_HDR_SZ;
+		break;
+	default:
+		data->dest_ifnum = NSS_IPV4_RX_INTERFACE;
+		data->headroom += sizeof(struct iphdr);
+		break;
+	}
+
+	/*
+	 * Header size is same for UDP and UDPLite
+	 */
+	data->headroom += sizeof(struct ethhdr) + sizeof(struct vlan_hdr) + sizeof(struct udphdr);
+
+	nss_dtlsmgr_trace("%px: encap ifnum(%u), src(%u), dest(0x%x)", ctx, data->ifnum,
+			  data->src_ifnum, data->dest_ifnum);
+
+	/*
+	 * Register NSS DTLS Encap I/F
+	 */
+	data->nss_ctx = nss_dtls_cmn_register_if(data->ifnum,
+						nss_dtlsmgr_ctx_dev_rx_inner,
+						nss_dtlsmgr_ctx_dev_event_inner,
+						ctx->dev,
+						0,
+						data->di_type,
+						(void *)data);
+	if (!data->nss_ctx) {
+		nss_dtlsmgr_warn("%px: NSS register interface(%u) failed", ctx, data->ifnum);
+		nss_dtlsmgr_ctx_free_dtls(dtls);
+		return -ENODEV;
+	}
+
+	if (!nss_dtlsmgr_ctx_configure_hdr(data)) {
+		nss_dtlsmgr_warn("%px: unable to configure(%d) hdr", ctx, data->di_type);
+		goto fail;
+	}
+
+	if (!nss_dtlsmgr_ctx_configure_dtls(data, dtls)) {
+		nss_dtlsmgr_warn("%px: unable to configure(%d) dtls", ctx, data->di_type);
+		goto fail;
+	}
+
+	write_lock(&ctx->lock);
+	list_add(&dtls->list, &data->dtls_active);
+	write_unlock(&ctx->lock);
+
+	return 0;
+fail:
+	nss_dtls_cmn_unregister_if(data->ifnum);
+	nss_dtlsmgr_ctx_free_dtls(dtls);
+	return -EBUSY;
+}
+
+/*
+ * nss_dtlsmgr_ctx_create_decap()
+ *	Create DTLS decapsulation dynamic interface and configure the DTLS context.
+ */
+static int nss_dtlsmgr_ctx_create_decap(struct nss_dtlsmgr_ctx *ctx, uint32_t ifnum, uint32_t src_ifnum,
+					struct nss_dtlsmgr_config *cfg)
+{
+	struct nss_dtlsmgr_ctx_data *data = &ctx->decap;
+	struct nss_dtlsmgr_dtls_data *dtls;
+
+	dtls = nss_dtlsmgr_ctx_alloc_dtls(ctx, &ctx->decap, &cfg->decap.crypto);
+	if (!dtls) {
+		nss_dtlsmgr_warn("%px: unable to allocate decap context data", ctx);
+		return -ENOMEM;
+	}
+
+	INIT_LIST_HEAD(&data->dtls_active);
+
+	data->di_type = NSS_DYNAMIC_INTERFACE_TYPE_DTLS_CMN_OUTER;
+	data->ifnum = ifnum;
+
+	/*
+	 * We need to provide the firmware the source and
+	 * destination interface number. This allows it
+	 * to work with dynamically created interfaces
+	 *
+	 */
+	data->src_ifnum = src_ifnum;
+	data->dest_ifnum = cfg->decap.nexthop_ifnum;
+	data->tailroom = data->headroom = 0;
+	data->flags = cfg->flags & (NSS_DTLSMGR_HDR_MASK | NSS_DTLSMGR_CRYPTO_MASK | NSS_DTLSMGR_DECAP_MASK);
+
+	nss_dtlsmgr_trace("%px: decap ifnum(%u), src(%u), dest(%u)", ctx, data->ifnum,
+			  data->src_ifnum, data->dest_ifnum);
+
+	dtls->window_size = cfg->decap.window_size;
+	dtls->ver = cfg->encap.ver;
+
+	/*
+	 * Register NSS DTLS Decap I/F
+	 */
+	data->nss_ctx = nss_dtls_cmn_register_if(data->ifnum,
+						nss_dtlsmgr_ctx_dev_rx_outer,
+						nss_dtlsmgr_ctx_dev_event_outer,
+						ctx->dev,
+						0,
+						data->di_type,
+						(void *)data);
+	if (!data->nss_ctx) {
+		nss_dtlsmgr_warn("%px: NSS register interface(%u) failed", ctx, data->ifnum);
+		nss_dtlsmgr_ctx_free_dtls(dtls);
+		return -ENODEV;
+	}
+
+	if (!nss_dtlsmgr_ctx_configure_hdr(data)) {
+		nss_dtlsmgr_warn("%px: unable to configure(%d) hdr", ctx, data->di_type);
+		goto fail;
+	}
+
+	if (!nss_dtlsmgr_ctx_configure_dtls(data, dtls)) {
+		nss_dtlsmgr_warn("%px: unable to configure(%d) hdr", ctx, data->di_type);
+		goto fail;
+	}
+
+	write_lock(&ctx->lock);
+	list_add(&dtls->list, &data->dtls_active);
+	write_unlock(&ctx->lock);
+
+	return 0;
+fail:
+	nss_dtls_cmn_unregister_if(data->ifnum);
+	nss_dtlsmgr_ctx_free_dtls(dtls);
+	return -EBUSY;
+}
+
+/*
+ * nss_dtlsmgr_session_switch()
+ *	Send a switch message to firmware to use new cipher spec
+ *
+ * Note: This deletes the older cipher spec and pops the next cipher spec
+ * for use.
+ */
+static bool nss_dtlsmgr_session_switch(struct nss_dtlsmgr_ctx *ctx, struct nss_dtlsmgr_ctx_data *data)
+{
+	const uint32_t type = NSS_DTLS_CMN_MSG_TYPE_SWITCH_DTLS;
+	enum nss_dtls_cmn_error resp = NSS_DTLS_CMN_ERROR_NONE;
+	struct nss_dtls_cmn_msg ndcm = {0};
+	struct nss_dtlsmgr_dtls_data *dtls;
+	nss_tx_status_t status;
+
+	BUG_ON(in_atomic());
+
+	/*
+	 * TODO: Add retry messaging to ensure that in case of failures, due to queue
+	 * full conditions we do attempt few retries before aborting.
+	 */
+	status = nss_dtls_cmn_tx_msg_sync(data->nss_ctx, data->ifnum, type, 0, &ndcm, &resp);
+	if (status != NSS_TX_SUCCESS) {
+		nss_dtlsmgr_warn("%px: msg_sync failed, if_num(%u), status(%d), type(%d), resp(%d)",
+				ctx, data->ifnum, type, status, resp);
+		return false;
+	}
+
+	/*
+	 * We essentially pop the head of the dtls list.
+	 * It is expected that an update should have already
+	 * added a new dtls entry at the tail of the list
+	 */
+	write_lock(&ctx->lock);
+	dtls = list_first_entry_or_null(&data->dtls_active, struct nss_dtlsmgr_dtls_data, list);
+	if (!dtls) {
+		write_unlock(&ctx->lock);
+		return false;
+	}
+
+	list_del(&dtls->list);
+	write_unlock(&ctx->lock);
+
+	nss_dtlsmgr_ctx_free_dtls(dtls);
+	return true;
+}
+
+/*
+ * nss_dtlsmgr_session_create()
+ *	Create DTLS session and associated crypto sessions.
+ */
+struct net_device *nss_dtlsmgr_session_create(struct nss_dtlsmgr_config *cfg)
+{
+	struct nss_dtlsmgr *drv = &g_dtls;
+	struct nss_dtlsmgr_ctx *ctx = NULL;
+	struct net_device *dev;
+	int32_t encap_ifnum;
+	int32_t decap_ifnum;
+	int error;
+
+	if (!atomic_read(&drv->is_configured)) {
+		nss_dtlsmgr_warn("%px: dtls firmware not ready", drv);
+		return NULL;
+	}
+
+	if ((cfg->encap.ver != NSS_DTLSMGR_VERSION_1_0) && (cfg->encap.ver != NSS_DTLSMGR_VERSION_1_2)) {
+		nss_dtlsmgr_warn("%px: invalid encapsulation version(%d)", drv, cfg->encap.ver);
+		return NULL;
+	}
+
+	encap_ifnum = nss_dynamic_interface_alloc_node(NSS_DYNAMIC_INTERFACE_TYPE_DTLS_CMN_INNER);
+	if (encap_ifnum < 0) {
+		nss_dtlsmgr_warn("%px: failed to allocate encap dynamic interface(%u)", drv, encap_ifnum);
+		return NULL;
+	}
+
+	decap_ifnum = nss_dynamic_interface_alloc_node(NSS_DYNAMIC_INTERFACE_TYPE_DTLS_CMN_OUTER);
+	if (decap_ifnum < 0) {
+		nss_dtlsmgr_warn("%px: failed to allocate decap dynamic interface(%u)", drv, decap_ifnum);
+		goto dealloc_encap_node;
+	}
+
+	nss_dtlsmgr_trace("dynamic interfaces, encap(%u), decap(%u)", encap_ifnum, decap_ifnum);
+
+	dev = alloc_netdev(sizeof(*ctx), "dtls%d", NET_NAME_ENUM, nss_dtlsmgr_ctx_dev_setup);
+	if (!dev) {
+		nss_dtlsmgr_warn("%px: unable to allocate dtls device", ctx);
+		goto dealloc_decap_node;
+	}
+
+	ctx = netdev_priv(dev);
+	ctx->dev = dev;
+	rwlock_init(&ctx->lock);
+
+	NSS_DTLSMGR_SET_MAGIC(ctx, NSS_DTLSMGR_CTX_MAGIC);
+
+	error = nss_dtlsmgr_ctx_create_encap(ctx, encap_ifnum, decap_ifnum, cfg);
+	if (error < 0) {
+		nss_dtlsmgr_warn("%px: unable to create encap context, error(%d)", ctx, error);
+		goto free_dev;
+	}
+
+	error = nss_dtlsmgr_ctx_create_decap(ctx, decap_ifnum, encap_ifnum, cfg);
+	if (error < 0) {
+		nss_dtlsmgr_warn("%px: unable to create decap context, error(%d)", ctx, error);
+		goto destroy_encap;
+	}
+
+	/*
+	 * Set the needed headroom and tailroom as a multiple of 4 bytes
+	 * so that the skb data pointer remains 4 byte aligned when the
+	 * headroom/tailroom is adjusted.
+	 */
+	dev->needed_headroom = NSS_DTLSMGR_NEEDED_HEADROOM_SZ;
+	dev->needed_tailroom = NSS_DTLSMGR_NEEDED_TAILROOM_SZ;
+
+	ctx->app_data = cfg->app_data;
+	ctx->notify_cb = cfg->notify;
+	ctx->data_cb = cfg->data;
+
+	/*
+	 * If, the user has not provided the callback function then
+	 * we will register the default callback handler
+	 */
+	if (!ctx->data_cb) {
+		ctx->data_cb = nss_dtlsmgr_ctx_dev_data_callback;
+		ctx->app_data = ctx;
+	}
+
+	error = register_netdev(dev);
+	if (error < 0) {
+		nss_dtlsmgr_warn("%px: unable register net_device(%s)", ctx, dev->name);
+		goto destroy_decap;
+	}
+
+	dev->mtu = dev->mtu - (ctx->encap.headroom + ctx->encap.tailroom);
+
+	nss_dtlsmgr_trace("%px: dtls session(%s) created, encap(%u), decap(%u)",
+			  ctx, dev->name, ctx->encap.ifnum, ctx->decap.ifnum);
+
+	if (nss_dtlsmgr_create_debugfs(ctx)) {
+		nss_dtlsmgr_warn("Failed to create debugfs for ctx(%px)", ctx);
+	}
+
+	return dev;
+
+destroy_decap:
+	nss_dtlsmgr_ctx_deconfigure(ctx, &ctx->decap);
+
+destroy_encap:
+	nss_dtlsmgr_ctx_deconfigure(ctx, &ctx->encap);
+
+free_dev:
+	free_netdev(dev);
+
+dealloc_decap_node:
+	nss_dynamic_interface_dealloc_node(decap_ifnum, NSS_DYNAMIC_INTERFACE_TYPE_DTLS_CMN_OUTER);
+
+dealloc_encap_node:
+	nss_dynamic_interface_dealloc_node(encap_ifnum, NSS_DYNAMIC_INTERFACE_TYPE_DTLS_CMN_INNER);
+	return NULL;
+}
+EXPORT_SYMBOL(nss_dtlsmgr_session_create);
+
+/*
+ * nss_dtlsmgr_session_destroy()
+ *	Destroy DTLS session
+ */
+nss_dtlsmgr_status_t nss_dtlsmgr_session_destroy(struct net_device *dev)
+{
+	struct nss_dtlsmgr_ctx *ctx = netdev_priv(dev);
+	NSS_DTLSMGR_VERIFY_MAGIC(ctx);
+
+	/*
+	 * Reset the callback handlers atomically
+	 */
+	xchg(&ctx->notify_cb, NULL);
+	xchg(&ctx->data_cb, NULL);
+
+	nss_dtlsmgr_trace("%px: destroying encap(%u) and decap(%u) sessions",
+			  ctx, ctx->encap.ifnum, ctx->decap.ifnum);
+
+	if (!nss_dtlsmgr_ctx_deconfigure(ctx, &ctx->encap)) {
+		nss_dtlsmgr_warn("%px: unable to deconfigure encap", ctx);
+		return NSS_DTLSMGR_FAIL;
+	}
+
+	if (!nss_dtlsmgr_ctx_deconfigure(ctx, &ctx->decap)) {
+		nss_dtlsmgr_warn("%px: unable to deconfigure decap", ctx);
+		return NSS_DTLSMGR_FAIL;
+	}
+
+	NSS_DTLSMGR_SET_MAGIC(ctx, 0);
+
+	unregister_netdev(dev);
+
+	return NSS_DTLSMGR_OK;
+}
+EXPORT_SYMBOL(nss_dtlsmgr_session_destroy);
+
+/*
+ * nss_dtlsmgr_session_update_encap()
+ *	Update the encapsulation crypto keys.
+ */
+nss_dtlsmgr_status_t nss_dtlsmgr_session_update_encap(struct net_device *dev, struct nss_dtlsmgr_config_update *cfg)
+{
+	struct nss_dtlsmgr_ctx *ctx = netdev_priv(dev);
+	struct nss_dtlsmgr_ctx_data *data = &ctx->encap;
+	struct nss_dtlsmgr_dtls_data *dtls, *prev_dtls;
+
+	NSS_DTLSMGR_VERIFY_MAGIC(ctx);
+
+	dtls = nss_dtlsmgr_ctx_alloc_dtls(ctx, &ctx->encap, &cfg->crypto);
+	if (!dtls) {
+		nss_dtlsmgr_warn("%px: unable to update encap context data", ctx);
+		return NSS_DTLSMGR_FAIL_NOMEM;
+	}
+
+	/*
+	 * Get the first entry in the list to compare the crypto key lengths
+	 */
+	prev_dtls = list_first_entry_or_null(&data->dtls_active, struct nss_dtlsmgr_dtls_data, list);
+	if (!prev_dtls) {
+		nss_dtlsmgr_warn("%px: dtls list is emtpy\n", ctx);
+		return NSS_DTLSMGR_FAIL_NOCRYPTO;
+	}
+
+	/*
+	 * If the new keys lengths are longer, then there isn't enough headroom and tailroom.
+	 */
+	BUG_ON(prev_dtls->iv_len < dtls->iv_len);
+	BUG_ON(prev_dtls->blk_len < dtls->blk_len);
+	BUG_ON(prev_dtls->hash_len < dtls->hash_len);
+
+	nss_dtlsmgr_trace("%px: encap context update allocated (%u)", ctx, ctx->encap.ifnum);
+
+	dtls->epoch = cfg->epoch;
+	dtls->window_size = cfg->window_size;
+
+	if (!nss_dtlsmgr_ctx_configure_dtls(&ctx->encap, dtls)) {
+		nss_dtlsmgr_warn("%px: unable to configure encap dtls", ctx);
+		nss_dtlsmgr_ctx_free_dtls(dtls);
+		return NSS_DTLSMGR_FAIL_MESSAGE;
+	}
+
+	write_lock(&ctx->lock);
+	list_add_tail(&dtls->list, &ctx->encap.dtls_active);
+	write_unlock(&ctx->lock);
+
+	nss_dtlsmgr_trace("%px: encap context update done", ctx);
+	return NSS_DTLSMGR_OK;
+}
+EXPORT_SYMBOL(nss_dtlsmgr_session_update_encap);
+
+/*
+ * nss_dtlsmgr_session_update_decap()
+ *	Update the decapsulation crypto keys.
+ */
+nss_dtlsmgr_status_t nss_dtlsmgr_session_update_decap(struct net_device *dev, struct nss_dtlsmgr_config_update *cfg)
+{
+	struct nss_dtlsmgr_ctx *ctx = netdev_priv(dev);
+	struct nss_dtlsmgr_dtls_data *dtls;
+
+	NSS_DTLSMGR_VERIFY_MAGIC(ctx);
+
+	dtls = nss_dtlsmgr_ctx_alloc_dtls(ctx, &ctx->decap, &cfg->crypto);
+	if (!dtls) {
+		nss_dtlsmgr_warn("%px: unable to update decap context data", ctx);
+		return NSS_DTLSMGR_FAIL_NOMEM;
+	}
+
+	nss_dtlsmgr_trace("%px: decap context update allocated (%u)", ctx, ctx->decap.ifnum);
+
+	dtls->epoch = cfg->epoch;
+	dtls->window_size = cfg->window_size;
+
+	if (!nss_dtlsmgr_ctx_configure_dtls(&ctx->decap, dtls)) {
+		nss_dtlsmgr_warn("%px: unable to configure decap dtls", ctx);
+		nss_dtlsmgr_ctx_free_dtls(dtls);
+		return NSS_DTLSMGR_FAIL_MESSAGE;
+	}
+
+	write_lock(&ctx->lock);
+	list_add_tail(&dtls->list, &ctx->decap.dtls_active);
+	write_unlock(&ctx->lock);
+
+	nss_dtlsmgr_trace("%px: decap context update done", ctx);
+	return NSS_DTLSMGR_OK;
+}
+EXPORT_SYMBOL(nss_dtlsmgr_session_update_decap);
+
+/*
+ * nss_dtlsmgr_session_switch_encap()
+ *	Send a message to encapsulation DTLS interface to switch to the new crypto keys.
+ */
+bool nss_dtlsmgr_session_switch_encap(struct net_device *dev)
+{
+	struct nss_dtlsmgr_ctx *ctx = netdev_priv(dev);
+	struct nss_dtlsmgr_ctx_data *data = &ctx->encap;
+
+	NSS_DTLSMGR_VERIFY_MAGIC(ctx);
+
+	if (!nss_dtlsmgr_session_switch(ctx, data)) {
+		nss_dtlsmgr_warn("%px: failed to send encap switch_dtls(%u)", ctx, data->ifnum);
+		return false;
+	}
+
+	nss_dtlsmgr_trace("%px: encap(%u) cipher switch done", ctx, data->ifnum);
+	return true;
+}
+EXPORT_SYMBOL(nss_dtlsmgr_session_switch_encap);
+
+/*
+ * nss_dtlsmgr_session_switch_decap()
+ *	Send a message to decapsulation DTLS interface to switch to the new crypto keys.
+ */
+bool nss_dtlsmgr_session_switch_decap(struct net_device *dev)
+{
+	struct nss_dtlsmgr_ctx *ctx = netdev_priv(dev);
+	struct nss_dtlsmgr_ctx_data *data = &ctx->decap;
+
+	NSS_DTLSMGR_VERIFY_MAGIC(ctx);
+
+	if (!nss_dtlsmgr_session_switch(ctx, data)) {
+		nss_dtlsmgr_warn("%px: failed to send decap switch_dtls(%u)", ctx, data->ifnum);
+		return false;
+	}
+
+	nss_dtlsmgr_trace("%px: decap(%u) cipher switch done", ctx, data->ifnum);
+	return true;
+}
+EXPORT_SYMBOL(nss_dtlsmgr_session_switch_decap);
+
+/*
+ * nss_dtlsmgr_get_interface()
+ *	Returns NSS DTLS interface number for encap/decap on success.
+ */
+int32_t nss_dtlsmgr_get_interface(struct net_device *dev, enum nss_dtlsmgr_interface_type type)
+{
+	int32_t ifnum;
+
+	switch (type) {
+	case NSS_DTLSMGR_INTERFACE_TYPE_INNER:
+		ifnum = nss_cmn_get_interface_number_by_dev_and_type(dev, NSS_DYNAMIC_INTERFACE_TYPE_DTLS_CMN_INNER);
+		break;
+
+	case NSS_DTLSMGR_INTERFACE_TYPE_OUTER:
+		ifnum = nss_cmn_get_interface_number_by_dev_and_type(dev, NSS_DYNAMIC_INTERFACE_TYPE_DTLS_CMN_OUTER);
+		break;
+
+	default:
+		nss_dtlsmgr_warn("%px: invalid interface type %d", dev, type);
+		return -EINVAL;
+	}
+
+	if (ifnum < 0) {
+		nss_dtlsmgr_warn("%px: couldn't find DTLS interface number (%d)", dev, ifnum);
+		return ifnum;
+	}
+
+	return nss_dtls_cmn_get_ifnum(ifnum);
+}
+EXPORT_SYMBOL(nss_dtlsmgr_get_interface);
diff --git a/qca-nss-clients/dtls/v2.0/nss_dtlsmgr_ctx_dev.c b/qca-nss-clients/dtls/v2.0/nss_dtlsmgr_ctx_dev.c
new file mode 100644
index 0000000..4bb736e
--- /dev/null
+++ b/qca-nss-clients/dtls/v2.0/nss_dtlsmgr_ctx_dev.c
@@ -0,0 +1,532 @@
+/*
+ **************************************************************************
+ * Copyright (c) 2017 - 2018, 2020 The Linux Foundation. All rights reserved.
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all copies.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ **************************************************************************
+ */
+
+/*
+ * nss_connmgr_dtls_ctx_dev.c
+ *	NSS DTLS Manager context device
+ */
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/module.h>
+#include <linux/debugfs.h>
+#include <linux/skbuff.h>
+#include <linux/if_arp.h>
+#include <linux/etherdevice.h>
+#include <linux/udp.h>
+#include <linux/ipv6.h>
+#include <net/ip.h>
+#include <net/ip6_route.h>
+#include <net/ipv6.h>
+#include <net/protocol.h>
+#include <net/route.h>
+#include <crypto/aes.h>
+#include <crypto/sha.h>
+
+#include <nss_api_if.h>
+#include <nss_dynamic_interface.h>
+#include <nss_dtls_cmn.h>
+#include <nss_dtlsmgr.h>
+
+#include "nss_dtlsmgr_private.h"
+
+/*
+ * nss_dtlsmgr_ctx_dev_event()
+ *	Event Callback to receive events from NSS.
+ */
+static void nss_dtlsmgr_ctx_dev_update_stats(struct nss_dtlsmgr_ctx *ctx,
+				struct nss_dtls_cmn_ctx_stats *msg_stats,
+				struct nss_dtlsmgr_stats *stats,
+				bool encap)
+{
+	int i;
+
+	dev_hold(ctx->dev);
+
+	stats->tx_packets += msg_stats->pkt.tx_packets;
+	stats->tx_bytes += msg_stats->pkt.tx_bytes;
+
+	stats->rx_packets += msg_stats->pkt.rx_packets;
+	stats->rx_bytes += msg_stats->pkt.rx_bytes;
+	stats->rx_dropped += nss_cmn_rx_dropped_sum(&msg_stats->pkt);
+	stats->rx_single_rec += msg_stats->rx_single_rec;
+	stats->rx_multi_rec += msg_stats->rx_multi_rec;
+
+	stats->fail_crypto_resource += msg_stats->fail_crypto_resource;
+	stats->fail_crypto_enqueue += msg_stats->fail_crypto_enqueue;
+	stats->fail_headroom += msg_stats->fail_headroom;
+	stats->fail_tailroom += msg_stats->fail_tailroom;
+	stats->fail_ver += msg_stats->fail_ver;
+	stats->fail_epoch += msg_stats->fail_epoch;
+	stats->fail_dtls_record += msg_stats->fail_dtls_record;
+	stats->fail_capwap += msg_stats->fail_capwap;
+	stats->fail_replay += msg_stats->fail_replay;
+	stats->fail_replay_dup += msg_stats->fail_replay_dup;
+	stats->fail_replay_win += msg_stats->fail_replay_win;
+	stats->fail_queue += msg_stats->fail_queue;
+	stats->fail_queue_nexthop += msg_stats->fail_queue_nexthop;
+	stats->fail_pbuf_alloc += msg_stats->fail_pbuf_alloc;
+	stats->fail_pbuf_linear += msg_stats->fail_pbuf_linear;
+	stats->fail_pbuf_stats += msg_stats->fail_pbuf_stats;
+	stats->fail_pbuf_align += msg_stats->fail_pbuf_align;
+	stats->fail_ctx_active += msg_stats->fail_ctx_active;
+	stats->fail_hwctx_active += msg_stats->fail_hwctx_active;
+	stats->fail_cipher += msg_stats->fail_cipher;
+	stats->fail_auth += msg_stats->fail_auth;
+	stats->fail_seq_ovf += msg_stats->fail_seq_ovf;
+	stats->fail_blk_len += msg_stats->fail_blk_len;
+	stats->fail_hash_len += msg_stats->fail_hash_len;
+
+	stats->fail_hw.len_error += msg_stats->fail_hw.len_error;
+	stats->fail_hw.token_error += msg_stats->fail_hw.token_error;
+	stats->fail_hw.bypass_error += msg_stats->fail_hw.bypass_error;
+	stats->fail_hw.config_error += msg_stats->fail_hw.config_error;
+	stats->fail_hw.algo_error += msg_stats->fail_hw.algo_error;
+	stats->fail_hw.hash_ovf_error += msg_stats->fail_hw.hash_ovf_error;
+	stats->fail_hw.ttl_error += msg_stats->fail_hw.ttl_error;
+	stats->fail_hw.csum_error += msg_stats->fail_hw.csum_error;
+	stats->fail_hw.timeout_error += msg_stats->fail_hw.timeout_error;
+
+	for (i = 0; i < NSS_DTLS_CMN_CLE_MAX; i++)
+		stats->fail_cle[i] += msg_stats->fail_cle[i];
+
+	if (ctx->notify_cb)
+		ctx->notify_cb(ctx->app_data, ctx->dev, stats, encap);
+
+	dev_put(ctx->dev);
+}
+
+/*
+ * nss_dtlsmgr_ctx_dev_event_inner()
+ *	Event handler for DTLS inner interface
+ */
+void nss_dtlsmgr_ctx_dev_event_inner(void *app_data, struct nss_cmn_msg *ncm)
+{
+	struct nss_dtlsmgr_ctx_data *data = (struct nss_dtlsmgr_ctx_data *)app_data;
+	struct nss_dtls_cmn_msg *ndcm = (struct nss_dtls_cmn_msg *)ncm;
+	struct nss_dtls_cmn_ctx_stats *msg_stats = &ndcm->msg.stats;
+	struct nss_dtlsmgr_ctx *ctx;
+
+	if (ncm->type != NSS_DTLS_CMN_MSG_TYPE_SYNC_STATS) {
+		nss_dtlsmgr_warn("%px: unsupported message type(%d)", data, ncm->type);
+		return;
+	}
+
+	ctx = container_of(data, struct nss_dtlsmgr_ctx, encap);
+	NSS_DTLSMGR_VERIFY_MAGIC(ctx);
+
+	nss_dtlsmgr_ctx_dev_update_stats(ctx, msg_stats, &data->stats, true);
+}
+
+/*
+ * nss_dtlsmgr_ctx_dev_event_outer()
+ *	Event handler for DTLS outer interface
+ */
+void nss_dtlsmgr_ctx_dev_event_outer(void *app_data, struct nss_cmn_msg *ncm)
+{
+	struct nss_dtlsmgr_ctx_data *data = (struct nss_dtlsmgr_ctx_data *)app_data;
+	struct nss_dtls_cmn_msg *ndcm = (struct nss_dtls_cmn_msg *)ncm;
+	struct nss_dtls_cmn_ctx_stats *msg_stats = &ndcm->msg.stats;
+	struct nss_dtlsmgr_ctx *ctx;
+
+	if (ncm->type != NSS_DTLS_CMN_MSG_TYPE_SYNC_STATS) {
+		nss_dtlsmgr_warn("%px: unsupported message type(%d)", data, ncm->type);
+		return;
+	}
+
+	ctx = container_of(data, struct nss_dtlsmgr_ctx, decap);
+	NSS_DTLSMGR_VERIFY_MAGIC(ctx);
+
+	nss_dtlsmgr_ctx_dev_update_stats(ctx, msg_stats, &data->stats, false);
+}
+
+/*
+ * nss_dtls_ctx_dev_data_callback()
+ *	Default callback if the user does not provide one
+ */
+void nss_dtlsmgr_ctx_dev_data_callback(void *app_data, struct sk_buff *skb)
+{
+	struct nss_dtlsmgr_metadata *ndm;
+	struct nss_dtlsmgr_stats *stats;
+	struct nss_dtlsmgr_ctx *ctx;
+
+	ctx = (struct nss_dtlsmgr_ctx *)app_data;
+	NSS_DTLSMGR_VERIFY_MAGIC(ctx);
+
+	stats = &ctx->decap.stats;
+	ndm = (struct nss_dtlsmgr_metadata *)skb->data;
+	if (ndm->result != NSS_DTLSMGR_METADATA_RESULT_OK) {
+		nss_dtlsmgr_warn("%px: DTLS packets has error(s): %d", skb->dev, ndm->result);
+		dev_kfree_skb_any(skb);
+		stats->fail_host_rx++;
+		return;
+	}
+
+	/*
+	 * Remove the DTLS metadata and indicate it up the stack
+	 */
+	skb_pull(skb, sizeof(*ndm));
+	skb_reset_mac_header(skb);
+	skb_reset_network_header(skb);
+
+	/*
+	 * Check IP version to identify if it is an IP packet
+	 */
+	switch (ip_hdr(skb)->version) {
+	case IPVERSION:
+		skb->protocol = htons(ETH_P_IP);
+		skb_set_transport_header(skb, sizeof(struct iphdr));
+		break;
+
+	case 6:
+		skb->protocol = htons(ETH_P_IPV6);
+		skb_set_transport_header(skb, sizeof(struct ipv6hdr));
+		break;
+
+	default:
+		nss_dtlsmgr_trace("%px: non-IP packet received (ifnum:%d)", ctx, ctx->decap.ifnum);
+	}
+
+	netif_receive_skb(skb);
+}
+
+/*
+ * nss_dtlsmgr_ctx_dev_rx_inner()
+ *	Receive and process packet after DTLS decapsulation
+ */
+void nss_dtlsmgr_ctx_dev_rx_inner(struct net_device *dev, struct sk_buff *skb, struct napi_struct *napi)
+{
+	struct nss_dtlsmgr_ctx *ctx;
+	struct nss_dtlsmgr_stats *stats;
+
+	BUG_ON(!dev);
+	BUG_ON(!skb);
+
+	dev_hold(dev);
+
+	ctx = netdev_priv(dev);
+	NSS_DTLSMGR_VERIFY_MAGIC(ctx);
+
+	stats = &ctx->decap.stats;
+
+	nss_dtlsmgr_trace("%px: RX DTLS decapsulated packet, ifnum(%d)", dev, ctx->decap.ifnum);
+
+	skb->pkt_type = PACKET_HOST;
+	skb->skb_iif = dev->ifindex;
+	skb->dev = dev;
+
+	ctx->data_cb(ctx->app_data, skb);
+	dev_put(dev);
+}
+
+/*
+ * nss_dtlsmgr_ctx_dev_rx_outer()
+ *	Receive and process packet from NSS after encapsulation.
+ */
+void nss_dtlsmgr_ctx_dev_rx_outer(struct net_device *dev, struct sk_buff *skb, struct napi_struct *napi)
+{
+	struct nss_dtlsmgr_ctx *ctx;
+	struct nss_dtlsmgr_stats *stats;
+
+	BUG_ON(!dev);
+	BUG_ON(!skb);
+
+	dev_hold(dev);
+
+	ctx = netdev_priv(dev);
+	NSS_DTLSMGR_VERIFY_MAGIC(ctx);
+
+	stats = &ctx->encap.stats;
+
+	nss_dtlsmgr_trace("%px: RX DTLS encapsulated packet, ifnum(%d)", dev, ctx->encap.ifnum);
+
+	skb->pkt_type = PACKET_HOST;
+	skb->skb_iif = dev->ifindex;
+	skb->dev = dev;
+
+	skb_reset_mac_header(skb);
+	skb_reset_network_header(skb);
+
+	/*
+	 * Check IP version to identify if it is an IP packet
+	 */
+	switch (ip_hdr(skb)->version) {
+	case IPVERSION: {
+		struct rtable *rt;
+		struct iphdr *iph;
+
+		skb->protocol = htons(ETH_P_IP);
+		skb_set_transport_header(skb, sizeof(struct iphdr));
+
+		iph = ip_hdr(skb);
+		rt = ip_route_output(&init_net, iph->daddr, iph->saddr, 0, 0);
+		if (IS_ERR(rt)) {
+			nss_dtlsmgr_warn("%px: No IPv4 route or out dev", dev);
+			dev_kfree_skb_any(skb);
+			stats->fail_host_rx++;
+			break;
+		}
+
+		skb_dst_set(skb, &rt->dst);
+		skb->ip_summed = CHECKSUM_COMPLETE;
+		ip_local_out(&init_net, NULL, skb);
+		break;
+	}
+
+	case 6: {
+		struct ipv6hdr *ip6h;
+		struct dst_entry *dst;
+		struct flowi6 fl6;
+
+		skb->protocol = htons(ETH_P_IPV6);
+		skb_set_transport_header(skb, sizeof(struct ipv6hdr));
+
+		ip6h = ipv6_hdr(skb);
+		memset(&fl6, 0, sizeof(fl6));
+		memcpy(&fl6.daddr, &ip6h->daddr, sizeof(fl6.daddr));
+		memcpy(&fl6.saddr, &ip6h->saddr, sizeof(fl6.saddr));
+
+		dst = ip6_route_output(&init_net, NULL, &fl6);
+		if (IS_ERR(dst)) {
+			nss_dtlsmgr_warn("%px: No IPv6 route or out dev", dev);
+			dev_kfree_skb_any(skb);
+			stats->fail_host_rx++;
+			break;
+		}
+
+		skb_dst_set(skb, dst);
+		skb->ip_summed = CHECKSUM_COMPLETE;
+		ip6_local_out(&init_net, NULL, skb);
+		break;
+	}
+
+	default:
+		/*
+		 * For a non-IP packet, if there is no registered
+		 * callback then it has to be dropped.
+		 */
+		nss_dtlsmgr_trace("%px: received non-IP packet", ctx);
+		dev_kfree_skb_any(skb);
+		stats->fail_host_rx++;
+	}
+
+	dev_put(dev);
+	return;
+}
+
+/*
+ * nss_dtlsmgr_ctx_dev_tx()
+ *	Transmit packet to DTLS node in NSS firmware.
+ */
+static netdev_tx_t nss_dtlsmgr_ctx_dev_tx(struct sk_buff *skb, struct net_device *dev)
+{
+	struct nss_dtlsmgr_ctx *ctx = netdev_priv(dev);
+	struct nss_dtlsmgr_metadata *ndm = NULL;
+	struct nss_dtlsmgr_ctx_data *encap;
+	struct nss_dtlsmgr_stats *stats;
+	struct sk_buff *skb2;
+	bool mdata_init;
+	bool expand_skb;
+	int nhead, ntail;
+
+	NSS_DTLSMGR_VERIFY_MAGIC(ctx);
+	encap = &ctx->encap;
+	stats = &encap->stats;
+
+	nhead = dev->needed_headroom;
+	ntail = dev->needed_tailroom;
+
+	/*
+	 * Check if skb is shared; unshare in case it is shared
+	 */
+	if (skb_shared(skb))
+		skb = skb_unshare(skb, in_atomic() ? GFP_ATOMIC : GFP_KERNEL);
+
+	nss_dtlsmgr_trace("%px: TX packet for DTLS encapsulation, ifnum(%d)", dev, encap->ifnum);
+
+	if (encap->flags & NSS_DTLSMGR_ENCAP_METADATA) {
+		ndm = (struct nss_dtlsmgr_metadata *)skb->data;
+
+		/*
+		 * Check if metadata is initialized
+		 */
+		mdata_init = ndm->flags & NSS_DTLSMGR_METADATA_FLAG_ENC;
+		if (unlikely(!mdata_init))
+			goto free;
+
+	}
+
+	/*
+	 * For all these cases
+	 * - create a writable copy of buffer
+	 * - increase the head room
+	 * - increase the tail room
+	 * - skb->data is not 4-byte aligned
+	 */
+	expand_skb = skb_cloned(skb) || (skb_headroom(skb) < nhead) || (skb_tailroom(skb) < ntail)
+			|| !IS_ALIGNED((unsigned long)skb->data, sizeof(uint32_t));
+
+	if (expand_skb) {
+		skb2 = skb_copy_expand(skb, nhead, ntail, GFP_ATOMIC);
+		if (!skb2) {
+			nss_dtlsmgr_trace("%px: unable to expand buffer for (%s)", ctx, dev->name);
+			/*
+			 * Update stats based on whether headroom or tailroom or both failed
+			 */
+			stats->fail_headroom = stats->fail_headroom + (skb_headroom(skb) < nhead);
+			stats->fail_tailroom = stats->fail_tailroom + (skb_tailroom(skb) < ntail);
+			goto free;
+		}
+
+		dev_kfree_skb_any(skb);
+		skb = skb2;
+	}
+
+	if (nss_dtls_cmn_tx_buf(skb, encap->ifnum, encap->nss_ctx) != NSS_TX_SUCCESS) {
+		nss_dtlsmgr_trace("%px: unable to tx buffer for (%u)", ctx, encap->ifnum);
+		return NETDEV_TX_BUSY;
+	}
+
+	return NETDEV_TX_OK;
+free:
+	dev_kfree_skb_any(skb);
+	stats->fail_host_tx++;
+	return NETDEV_TX_OK;
+}
+
+/*
+ * nss_dtlsmgr_ctx_dev_close()
+ *	Stop packet transmission on the DTLS network device.
+ */
+static int nss_dtlsmgr_ctx_dev_close(struct net_device *dev)
+{
+	netif_stop_queue(dev);
+	return 0;
+}
+
+/*
+ * nss_dtlsmgr_ctx_dev_open()
+ *	Start processing packets on the DTLS network device.
+ */
+static int nss_dtlsmgr_ctx_dev_open(struct net_device *dev)
+{
+	netif_start_queue(dev);
+	return 0;
+}
+
+/*
+ * nss_dtlsmgr_ctx_dev_free()
+ *	Free an existing DTLS context device.
+ */
+static void nss_dtlsmgr_ctx_dev_free(struct net_device *dev)
+{
+	struct nss_dtlsmgr_ctx *ctx = netdev_priv(dev);
+
+	nss_dtlsmgr_trace("%px: free dtls context device(%s)", dev, dev->name);
+
+	if (ctx->dentry)
+		debugfs_remove_recursive(ctx->dentry);
+
+	free_netdev(dev);
+}
+
+/*
+ * nss_dtlsmgr_ctx_get_dev_stats64()
+ *	To get the netdev stats
+ */
+static struct rtnl_link_stats64 *nss_dtlsmgr_ctx_get_dev_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+	struct nss_dtlsmgr_ctx *ctx = netdev_priv(dev);
+	struct nss_dtlsmgr_stats *encap_stats, *decap_stats;
+
+	encap_stats = &ctx->encap.stats;
+	decap_stats = &ctx->decap.stats;
+
+	stats->rx_packets = decap_stats->rx_packets;
+	stats->rx_bytes = decap_stats->rx_bytes;
+	stats->rx_dropped = decap_stats->rx_dropped;
+
+	stats->tx_bytes = encap_stats->tx_bytes;
+	stats->tx_packets = encap_stats->tx_packets;
+	stats->tx_dropped = encap_stats->fail_headroom + encap_stats->fail_tailroom;
+
+	return stats;
+}
+
+/*
+ * nss_dtlsmgr_ctx_dev_stats64()
+ *	Report packet statistics to Linux.
+ */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0))
+static struct rtnl_link_stats64 *nss_dtlsmgr_ctx_dev_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+	return nss_dtlsmgr_ctx_get_dev_stats64(dev, stats);
+}
+#else
+static void nss_dtlsmgr_ctx_dev_stats64(struct net_device *dev, struct rtnl_link_stats64 *stats)
+{
+	nss_dtlsmgr_ctx_get_dev_stats64(dev, stats);
+}
+#endif
+
+/*
+ * nss_dtlsmgr_ctx_dev_change_mtu()
+ *	Change MTU size of DTLS context device.
+ */
+static int32_t nss_dtlsmgr_ctx_dev_change_mtu(struct net_device *dev, int32_t mtu)
+{
+	dev->mtu = mtu;
+	return 0;
+}
+
+/*
+ * DTLS netdev ops
+ */
+static const struct net_device_ops nss_dtlsmgr_ctx_dev_ops = {
+	.ndo_start_xmit = nss_dtlsmgr_ctx_dev_tx,
+	.ndo_open = nss_dtlsmgr_ctx_dev_open,
+	.ndo_stop = nss_dtlsmgr_ctx_dev_close,
+	.ndo_get_stats64 = nss_dtlsmgr_ctx_dev_stats64,
+	.ndo_change_mtu = nss_dtlsmgr_ctx_dev_change_mtu,
+};
+
+/*
+ * nss_dtlsmgr_ctx_dev_setup()
+ *	Setup the DTLS network device.
+ */
+void nss_dtlsmgr_ctx_dev_setup(struct net_device *dev)
+{
+	dev->addr_len = ETH_ALEN;
+	dev->mtu = ETH_DATA_LEN;
+	dev->hard_header_len = 0;
+	dev->needed_headroom = 0;
+	dev->needed_tailroom = 0;
+
+	dev->type = ARPHRD_TUNNEL;
+	dev->ethtool_ops = NULL;
+	dev->header_ops = NULL;
+	dev->netdev_ops = &nss_dtlsmgr_ctx_dev_ops;
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(4, 11, 8))
+	dev->destructor = nss_dtlsmgr_ctx_dev_free;
+#else
+	dev->priv_destructor = nss_dtlsmgr_ctx_dev_free;
+#endif
+	memcpy(dev->dev_addr, "\xaa\xbb\xcc\xdd\xee\xff", dev->addr_len);
+	memset(dev->broadcast, 0xff, dev->addr_len);
+	memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
+}
diff --git a/qca-nss-clients/dtls/v2.0/nss_dtlsmgr_ctx_stats.c b/qca-nss-clients/dtls/v2.0/nss_dtlsmgr_ctx_stats.c
new file mode 100644
index 0000000..5d2b4f3
--- /dev/null
+++ b/qca-nss-clients/dtls/v2.0/nss_dtlsmgr_ctx_stats.c
@@ -0,0 +1,240 @@
+/*
+ **************************************************************************
+ * Copyright (c) 2017, 2020, The Linux Foundation. All rights reserved.
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all copies.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ **************************************************************************
+ */
+
+/*
+ * nss_dtlsmgr_ctx_stats.c
+ *	NSS DTLS Manager context statistics
+ */
+
+#include <linux/atomic.h>
+#include <linux/crypto.h>
+#include <linux/debugfs.h>
+#include <linux/module.h>
+#include <linux/rtnetlink.h>
+#include <linux/types.h>
+#include <linux/version.h>
+
+#include <nss_api_if.h>
+#include <nss_dtlsmgr.h>
+
+#include "nss_dtlsmgr_private.h"
+
+#define NSS_DTLSMGR_STATS_MAX_STR_LENGTH 64
+#define NSS_DTLSMGR_STATS_EXTRA_LINES 16
+
+extern struct nss_dtlsmgr g_dtls;
+
+/*
+ * nss_dtlsmgr_ctx_fill_hw_error_stats()
+ *	Fill hardware error statistics
+ */
+static ssize_t nss_dtlsmgr_ctx_fill_hw_error_stats(struct nss_dtlsmgr_stats *stats, char *buf,
+						   ssize_t max_buf_len, ssize_t len)
+{
+	int i;
+
+	len += snprintf(buf + len, max_buf_len, "\nHardware Errors\n---------------\n");
+	len += snprintf(buf + len, max_buf_len, "length_error        = %lld\n", stats->fail_hw.len_error);
+	len += snprintf(buf + len, max_buf_len, "token_error         = %lld\n", stats->fail_hw.token_error);
+	len += snprintf(buf + len, max_buf_len, "bypass_error        = %lld\n", stats->fail_hw.bypass_error);
+	len += snprintf(buf + len, max_buf_len, "config_error        = %lld\n", stats->fail_hw.config_error);
+	len += snprintf(buf + len, max_buf_len, "algo_error          = %lld\n", stats->fail_hw.algo_error);
+	len += snprintf(buf + len, max_buf_len, "hash_ovf_error      = %lld\n", stats->fail_hw.hash_ovf_error);
+	len += snprintf(buf + len, max_buf_len, "ttl_error           = %lld\n", stats->fail_hw.ttl_error);
+	len += snprintf(buf + len, max_buf_len, "csum_error          = %lld\n", stats->fail_hw.csum_error);
+	len += snprintf(buf + len, max_buf_len, "timeout_error       = %lld\n", stats->fail_hw.timeout_error);
+
+	len += snprintf(buf + len, max_buf_len, "\nClassifcation Errors\n---------------------\n");
+	for (i = 0; i < NSS_DTLS_CMN_CLE_MAX; i++) {
+		/*
+		 * Don't print if there are no errors
+		 */
+		if (!stats->fail_cle[i])
+			continue;
+		len += snprintf(buf + len, max_buf_len, "cle_error_%02d   - %lld\n", i, stats->fail_cle[i]);
+	}
+
+	return len;
+}
+
+/*
+ * nss_dtlsmgr_ctx_encap_stats_read()
+ *	Read the DTLS encapsulation statistics.
+ */
+static ssize_t nss_dtlsmgr_ctx_encap_stats_read(struct file *filep, char __user *buffer, size_t count, loff_t *ppos)
+{
+	struct nss_dtlsmgr_ctx *ctx = filep->private_data;
+	struct nss_dtlsmgr_stats *stats;
+	ssize_t max_buf_len, len, ret;
+	char *buf;
+
+	NSS_DTLSMGR_VERIFY_MAGIC(ctx);
+
+	/*
+	 * (Lines of output) * (Max string length)
+	 */
+	max_buf_len = ((sizeof(*stats) / sizeof(uint64_t)) + NSS_DTLSMGR_STATS_EXTRA_LINES) *
+		      NSS_DTLSMGR_STATS_MAX_STR_LENGTH;
+
+	buf = vzalloc(max_buf_len);
+	if (!buf) {
+		nss_dtlsmgr_warn("Failed to allocate memory for statistic buffer");
+		return 0;
+	}
+
+	/*
+	 * Fill encap statistics
+	 */
+	stats = &ctx->encap.stats;
+	len = snprintf(buf, max_buf_len, "ENCAPSULATION INTERFACE (%d) STATISTICS\n", ctx->encap.ifnum);
+	len += snprintf(buf + len, max_buf_len - len, "rx_packets           = %lld\n", stats->rx_packets);
+	len += snprintf(buf + len, max_buf_len - len, "rx_bytes             = %lld\n", stats->rx_bytes);
+	len += snprintf(buf + len, max_buf_len - len, "rx_dropped           = %lld\n", stats->rx_dropped);
+	len += snprintf(buf + len, max_buf_len - len, "rx_single_rec        = %lld\n", stats->rx_single_rec);
+	len += snprintf(buf + len, max_buf_len - len, "rx_multi_rec         = %lld\n", stats->rx_multi_rec);
+	len += snprintf(buf + len, max_buf_len - len, "tx_packets           = %lld\n", stats->tx_packets);
+	len += snprintf(buf + len, max_buf_len - len, "tx_bytes             = %lld\n", stats->tx_bytes);
+	len += snprintf(buf + len, max_buf_len - len, "fail_crypto_resource = %lld\n", stats->fail_crypto_resource);
+	len += snprintf(buf + len, max_buf_len - len, "fail_crypto_enqueue  = %lld\n", stats->fail_crypto_enqueue);
+	len += snprintf(buf + len, max_buf_len - len, "fail_headroom        = %lld\n", stats->fail_headroom);
+	len += snprintf(buf + len, max_buf_len - len, "fail_tailroom        = %lld\n", stats->fail_tailroom);
+	len += snprintf(buf + len, max_buf_len - len, "fail_queue           = %lld\n", stats->fail_queue);
+	len += snprintf(buf + len, max_buf_len - len, "fail_queue_nexthop   = %lld\n", stats->fail_queue_nexthop);
+	len += snprintf(buf + len, max_buf_len - len, "fail_pbuf_alloc      = %lld\n", stats->fail_pbuf_alloc);
+	len += snprintf(buf + len, max_buf_len - len, "fail_pbuf_linear     = %lld\n", stats->fail_pbuf_linear);
+	len += snprintf(buf + len, max_buf_len - len, "fail_pbuf_stats      = %lld\n", stats->fail_pbuf_stats);
+	len += snprintf(buf + len, max_buf_len - len, "fail_pbuf_align      = %lld\n", stats->fail_pbuf_align);
+	len += snprintf(buf + len, max_buf_len - len, "fail_ctx_active      = %lld\n", stats->fail_ctx_active);
+	len += snprintf(buf + len, max_buf_len - len, "fail_hwctx_active    = %lld\n", stats->fail_hwctx_active);
+	len += snprintf(buf + len, max_buf_len - len, "fail_cipher          = %lld\n", stats->fail_cipher);
+	len += snprintf(buf + len, max_buf_len - len, "fail_auth            = %lld\n", stats->fail_auth);
+	len += snprintf(buf + len, max_buf_len - len, "fail_seq_overflow    = %lld\n", stats->fail_seq_ovf);
+	len += snprintf(buf + len, max_buf_len - len, "fail_host_tx         = %lld\n", stats->fail_host_tx);
+	len += snprintf(buf + len, max_buf_len - len, "fail_host_rx         = %lld\n", stats->fail_host_rx);
+
+	/* Returns total number of bytes written to the buffer */
+	len = nss_dtlsmgr_ctx_fill_hw_error_stats(stats, buf, max_buf_len, len);
+
+	ret = simple_read_from_buffer(buffer, count, ppos, buf, len);
+	vfree(buf);
+
+	return ret;
+}
+/*
+ * nss_dtlsmgr_ctx_decap_stats_read()
+ *	Read the DTLS decapsulation statistics.
+ */
+static ssize_t nss_dtlsmgr_ctx_decap_stats_read(struct file *filep, char __user *buffer, size_t count, loff_t *ppos)
+{
+	struct nss_dtlsmgr_ctx *ctx = filep->private_data;
+	struct nss_dtlsmgr_stats *stats;
+	ssize_t max_buf_len, ret;
+	ssize_t len = 0;
+	char *buf;
+
+	NSS_DTLSMGR_VERIFY_MAGIC(ctx);
+
+	/*
+	 * (Lines of output) * (Max string length)
+	 */
+	max_buf_len = ((sizeof(*stats) / sizeof(uint64_t)) + NSS_DTLSMGR_STATS_EXTRA_LINES) *
+		      NSS_DTLSMGR_STATS_MAX_STR_LENGTH;
+
+	buf = vzalloc(max_buf_len);
+	if (!buf) {
+		nss_dtlsmgr_warn("Failed to allocate memory for statistic buffer");
+		return 0;
+	}
+
+	/*
+	 * Fill decap statistics
+	 */
+	stats = &ctx->decap.stats;
+	len += snprintf(buf, max_buf_len - len, "DECAPSULATION INTERFACE (%d) STATISTICS\n", ctx->decap.ifnum);
+	len += snprintf(buf + len, max_buf_len - len, "rx_packets           = %lld\n", stats->rx_packets);
+	len += snprintf(buf + len, max_buf_len - len, "rx_bytes             = %lld\n", stats->rx_bytes);
+	len += snprintf(buf + len, max_buf_len - len, "rx_dropped           = %lld\n", stats->rx_dropped);
+	len += snprintf(buf + len, max_buf_len - len, "rx_single_rec        = %lld\n", stats->rx_single_rec);
+	len += snprintf(buf + len, max_buf_len - len, "rx_multi_rec         = %lld\n", stats->rx_multi_rec);
+	len += snprintf(buf + len, max_buf_len - len, "tx_packets           = %lld\n", stats->tx_packets);
+	len += snprintf(buf + len, max_buf_len - len, "tx_bytes             = %lld\n", stats->tx_bytes);
+	len += snprintf(buf + len, max_buf_len - len, "fail_crypto_resource = %lld\n", stats->fail_crypto_resource);
+	len += snprintf(buf + len, max_buf_len - len, "fail_crypto_enqueue  = %lld\n", stats->fail_crypto_enqueue);
+	len += snprintf(buf + len, max_buf_len - len, "fail_version         = %lld\n", stats->fail_ver);
+	len += snprintf(buf + len, max_buf_len - len, "fail_epoch           = %lld\n", stats->fail_epoch);
+	len += snprintf(buf + len, max_buf_len - len, "fail_dtls_record     = %lld\n", stats->fail_dtls_record);
+	len += snprintf(buf + len, max_buf_len - len, "fail_capwap          = %lld\n", stats->fail_capwap);
+	len += snprintf(buf + len, max_buf_len - len, "fail_replay          = %lld\n", stats->fail_replay);
+	len += snprintf(buf + len, max_buf_len - len, "fail_replay_dup      = %lld\n", stats->fail_replay_dup);
+	len += snprintf(buf + len, max_buf_len - len, "fail_replay_window   = %lld\n", stats->fail_replay_win);
+	len += snprintf(buf + len, max_buf_len - len, "fail_queue           = %lld\n", stats->fail_queue);
+	len += snprintf(buf + len, max_buf_len - len, "fail_queue_nexthop   = %lld\n", stats->fail_queue_nexthop);
+	len += snprintf(buf + len, max_buf_len - len, "fail_pbuf_alloc      = %lld\n", stats->fail_pbuf_alloc);
+	len += snprintf(buf + len, max_buf_len - len, "fail_pbuf_linear     = %lld\n", stats->fail_pbuf_linear);
+	len += snprintf(buf + len, max_buf_len - len, "fail_pbuf_stats      = %lld\n", stats->fail_pbuf_stats);
+	len += snprintf(buf + len, max_buf_len - len, "fail_pbuf_align      = %lld\n", stats->fail_pbuf_align);
+	len += snprintf(buf + len, max_buf_len - len, "fail_ctx_active      = %lld\n", stats->fail_ctx_active);
+	len += snprintf(buf + len, max_buf_len - len, "fail_hwctx_active    = %lld\n", stats->fail_hwctx_active);
+	len += snprintf(buf + len, max_buf_len - len, "fail_cipher          = %lld\n", stats->fail_cipher);
+	len += snprintf(buf + len, max_buf_len - len, "fail_auth            = %lld\n", stats->fail_auth);
+	len += snprintf(buf + len, max_buf_len - len, "fail_seq_overflow    = %lld\n", stats->fail_seq_ovf);
+	len += snprintf(buf + len, max_buf_len - len, "fail_block_length    = %lld\n", stats->fail_blk_len);
+	len += snprintf(buf + len, max_buf_len - len, "fail_hash_length     = %lld\n", stats->fail_hash_len);
+	len += snprintf(buf + len, max_buf_len - len, "fail_host_tx         = %lld\n", stats->fail_host_tx);
+	len += snprintf(buf + len, max_buf_len - len, "fail_host_rx         = %lld\n", stats->fail_host_rx);
+
+	/* Returns total number of bytes written to the buffer */
+	len = nss_dtlsmgr_ctx_fill_hw_error_stats(stats, buf, max_buf_len, len);
+
+	ret = simple_read_from_buffer(buffer, count, ppos, buf, len);
+	vfree(buf);
+
+	return ret;
+}
+
+/*
+ * Context file operation structure instance
+ */
+static const struct file_operations nss_dtlsmgr_encap_stats_op = {
+	.owner = THIS_MODULE,
+	.open = simple_open,
+	.read = nss_dtlsmgr_ctx_encap_stats_read,
+};
+
+static const struct file_operations nss_dtlsmgr_decap_stats_op = {
+	.owner = THIS_MODULE,
+	.open = simple_open,
+	.read = nss_dtlsmgr_ctx_decap_stats_read,
+};
+
+/*
+ * nss_dtlmsgr_create_debugfs()
+ *	Create debugfs files to display session statistics.
+ */
+int nss_dtlsmgr_create_debugfs(struct nss_dtlsmgr_ctx *ctx)
+{
+	struct nss_dtlsmgr *drv = &g_dtls;
+
+	ctx->dentry = debugfs_create_dir(ctx->dev->name, drv->root_dir);
+	if (!ctx->dentry) {
+		nss_dtlsmgr_warn("failed to create debugfs directory");
+		return -1;
+	}
+
+	debugfs_create_file("encap_stats", S_IRUGO, ctx->dentry, ctx, &nss_dtlsmgr_encap_stats_op);
+	debugfs_create_file("decap_stats", S_IRUGO, ctx->dentry, ctx, &nss_dtlsmgr_decap_stats_op);
+	return 0;
+}
diff --git a/qca-nss-clients/dtls/v2.0/nss_dtlsmgr_private.h b/qca-nss-clients/dtls/v2.0/nss_dtlsmgr_private.h
new file mode 100644
index 0000000..131192c
--- /dev/null
+++ b/qca-nss-clients/dtls/v2.0/nss_dtlsmgr_private.h
@@ -0,0 +1,185 @@
+/*
+ **************************************************************************
+ * Copyright (c) 2017-2018, 2020, The Linux Foundation. All rights reserved.
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all copies.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ **************************************************************************
+ */
+
+/*
+ * nss_dtlsmgr_private.h
+ */
+
+#ifndef __NSS_DTLSMGR_PRIVATE_H_
+#define __NSS_DTLSMGR_PRIVATE_H_
+
+#define NSS_DTLSMGR_DEBUG_LEVEL_ERROR 1
+#define NSS_DTLSMGR_DEBUG_LEVEL_WARN 2
+#define NSS_DTLSMGR_DEBUG_LEVEL_INFO 3
+#define NSS_DTLSMGR_DEBUG_LEVEL_TRACE 4
+
+#define nss_dtlsmgr_error(s, ...) do {	\
+	if (net_ratelimit()) {	\
+		pr_alert("%s[%d]:" s "\n", __func__, __LINE__, ##__VA_ARGS__);	\
+	}	\
+} while (0)
+
+#if defined(CONFIG_DYNAMIC_DEBUG)
+/*
+ * Compile messages for dynamic enable/disable
+ */
+#define nss_dtlsmgr_warn(s, ...) pr_debug("%s[%d]:" s "\n", __func__, __LINE__, ##__VA_ARGS__)
+#define nss_dtlsmgr_info(s, ...) pr_debug("%s[%d]:" s "\n", __func__, __LINE__, ##__VA_ARGS__)
+#define nss_dtlsmgr_trace(s, ...) pr_debug("%s[%d]:" s "\n", __func__, __LINE__, ##__VA_ARGS__)
+#else
+
+/*
+ * Statically compile messages at different levels
+ */
+#define nss_dtlsmgr_warn(s, ...) {	\
+	if (NSS_DTLSMGR_DEBUG_LEVEL > NSS_DTLSMGR_DEBUG_LEVEL_ERROR)	\
+		pr_warn("%s[%d]:" s "\n", __func__, __LINE__, ##__VA_ARGS__)	\
+}
+
+#define nss_dtlsmgr_info(s, ...) {	\
+	if (NSS_DTLSMGR_DEBUG_LEVEL > NSS_DTLSMGR_DEBUG_LEVEL_WARN)	\
+		pr_notice("%s[%d]:" s "\n", __func__, __LINE__, ##__VA_ARGS__)	\
+}
+
+#define nss_dtlsmgr_trace(s, ...) {	\
+	if (NSS_DTLSMGR_DEBUG_LEVEL > NSS_DTLSMGR_DEBUG_LEVEL_INFO)	\
+		pr_info("%s[%d]:" s "\n", __func__, __LINE__, ##__VA_ARGS__)	\
+}
+
+#endif /* CONFIG_DYNAMIC_DEBUG */
+
+#define NSS_DTLSMGR_DTLS_HDR_SZ 13			/* DTLS header length */
+#define NSS_DTLSMGR_CAPWAP_DTLS_HDR_SZ 4		/* CAPWAP-DTLS header length */
+#define NSS_DTLSMGR_CTX_MAGIC 0x5d7eb219		/* DTLS context magic value */
+#define NSS_DTLSMGR_EDMA_PRE_HDR_SZ 32			/* EDMA preheader size */
+#define NSS_DTLSMGR_SGT_HDR_SZ 8			/* Security group header size */
+
+
+#if defined (NSS_DTLSMGR_DEBUG)
+#define NSS_DTLSMGR_VERIFY_MAGIC(ctx) do { \
+	struct nss_dtlsmgr_ctx *__ctx = (ctx);	\
+	BUG_ON(__ctx->magic != NSS_DTLSMGR_CTX_MAGIC);	\
+} while(0)
+
+#define NSS_DTLSMGR_SET_MAGIC(ctx, magic) do {	\
+	struct nss_dtlsmgr_ctx *__ctx = (ctx);	\
+	__ctx->magic = (magic);	\
+} while(0)
+#else
+#define NSS_DTLSMGR_VERIFY_MAGIC(ctx)
+#define NSS_DTLSMGR_SET_MAGIC(ctx, magic)
+#endif
+
+/*
+ * DTLS algorithm information
+ */
+struct nss_dtlsmgr_algo_info {
+	char *name;				/* Linux crypto algorithm string. */
+	uint32_t rta_key_size;			/* RTA key attribute size. */
+};
+
+/*
+ * DTLS flow information
+ */
+struct nss_dtlsmgr_flow_data {
+	uint32_t sip[4];			/* Source IPv4/v6 address. */
+	uint32_t dip[4];			/* Destination IPv4/v6 address. */
+	uint32_t flags;				/* Transformation flags. */
+
+	uint16_t sport;				/* Source UDP/UPDLite port. */
+	uint16_t dport;				/* Destination UDP/UDPLite port. */
+
+	uint8_t dscp;				/* Dscp value incase of static. */
+	uint8_t hop_limit_ttl;			/* Hop limit or time to live. */
+	bool dscp_copy;				/* Copy dscp. */
+	bool df;				/* Do not fragment settings. */
+};
+
+/*
+ * DTLS configuration data
+ */
+struct nss_dtlsmgr_dtls_data {
+	struct list_head list;			/* List of crypto data. */
+	struct crypto_aead *aead;		/* Linux AEAD context. */
+	uint32_t crypto_idx;
+	uint32_t ver;				/* DTLS version. */
+
+	uint16_t window_size;			/* DTLS anti-replay window. */
+	uint16_t epoch;				/* Current epoch. */
+
+	uint8_t blk_len;			/* Cipher block length. */
+	uint8_t hash_len;			/* Hash length. */
+	uint8_t iv_len;				/* IV length. */
+	uint8_t res1;
+};
+
+/*
+ * DTLS context data
+ */
+struct nss_dtlsmgr_ctx_data {
+	struct nss_dtlsmgr_stats stats;		/* Statistics. */
+	struct nss_dtlsmgr_flow_data flow;	/* Flow data information. */
+	struct nss_ctx_instance *nss_ctx;	/* NSS context handle. */
+	struct list_head dtls_active;		/* List of active DTLS record(s). */
+
+	uint32_t headroom;			/* Headroom needed. */
+	uint32_t tailroom;			/* Tailroom needed. */
+	uint32_t ifnum;				/* NSS interface number. */
+	uint32_t src_ifnum;			/* Source interface number for NSS. */
+	uint32_t dest_ifnum;			/* Destination interface number for NSS. */
+	uint32_t flags;				/* DTLS flags. */
+	uint32_t di_type;			/* Dynamic interface type. */
+};
+
+/*
+ * DTLS manager context
+ */
+struct nss_dtlsmgr_ctx {
+	rwlock_t lock;				/* Context lock. */
+	struct net_device *dev;			/* Session netdevice. */
+	struct dentry *dentry;			/* Debugfs directory for ctx statistics. */
+
+	struct nss_dtlsmgr_ctx_data encap;	/* Encapsulation data. */
+	struct nss_dtlsmgr_ctx_data decap;	/* Decapsulation data. */
+
+	void *app_data;				/* Opaque data for callback */
+	nss_dtlsmgr_notify_callback_t notify_cb;/* Statistics notification callback. */
+	nss_dtlsmgr_data_callback_t data_cb;	/* Data callback. */
+
+#if defined (NSS_DTLSMGR_DEBUG)
+	uint32_t magic;				/* Magic check. */
+#endif
+};
+
+/*
+ * DTLS manager data
+ */
+struct nss_dtlsmgr {
+	atomic_t is_configured;			/* Firmware is configured. */
+	struct dentry *root_dir;		/* Debugfs root directory. */
+	struct nss_ctx_instance *nss_ctx;	/* NSS data/message handle. */
+};
+
+extern struct nss_dtlsmgr g_dtls;
+
+extern void nss_dtlsmgr_ctx_dev_event_inner(void *if_ctx, struct nss_cmn_msg *ndcm);
+extern void nss_dtlsmgr_ctx_dev_event_outer(void *if_ctx, struct nss_cmn_msg *ndcm);
+extern void nss_dtlsmgr_ctx_dev_data_callback(void *app_data, struct sk_buff *skb);
+extern void nss_dtlsmgr_ctx_dev_rx_inner(struct net_device *netdev, struct sk_buff *skb, struct napi_struct *napi);
+extern void nss_dtlsmgr_ctx_dev_rx_outer(struct net_device *netdev, struct sk_buff *skb, struct napi_struct *napi);
+extern void nss_dtlsmgr_ctx_dev_setup(struct net_device *dev);
+extern int nss_dtlsmgr_create_debugfs(struct nss_dtlsmgr_ctx *ctx);
+#endif /* !__NSS_DTLSMGR_PRIVATE_H_ */
diff --git a/qca-nss-clients/eogremgr/Makefile b/qca-nss-clients/eogremgr/Makefile
new file mode 100644
index 0000000..cea37c9
--- /dev/null
+++ b/qca-nss-clients/eogremgr/Makefile
@@ -0,0 +1,6 @@
+# Makefile for eogre client
+ccflags-y := -I$(obj)/../exports -I$(obj)/.. \
+	-DNSS_CLIENT_BUILD_ID="$(BUILD_ID)" -DNSS_EOGREMGR_DEBUG_LEVEL=2 -Wall -Werror
+
+obj-m += qca-nss-eogremgr.o
+qca-nss-eogremgr-objs := nss_eogremgr.o
diff --git a/qca-nss-clients/eogremgr/nss_eogremgr.c b/qca-nss-clients/eogremgr/nss_eogremgr.c
new file mode 100644
index 0000000..4861f35
--- /dev/null
+++ b/qca-nss-clients/eogremgr/nss_eogremgr.c
@@ -0,0 +1,587 @@
+/*
+ **************************************************************************
+ * Copyright (c) 2019-2020, The Linux Foundation. All rights reserved.
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all copies.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ **************************************************************************
+ */
+
+/*
+ * nss_eogremgr.c
+ *	NSS EOGRE manager
+ */
+
+#include <nss_api_if.h>
+#include <nss_cmn.h>
+#include "nss_connmgr_gre_public.h"
+#include <linux/of.h>
+#include "nss_eogremgr.h"
+#include "nss_eogremgr_priv.h"
+
+/*
+ * Mapping table from tunnel-id to rule/GRE netdev
+ */
+struct nss_eogremgr_tunnel {
+	union {
+		struct nss_ipv4_rule_create_msg v4;	/* IPv4 rule structure. */
+		struct nss_ipv6_rule_create_msg v6;	/* IPv6 rule structure. */
+	} ip_rule;
+
+	bool is_ipv4;			/* Set when tunnel is ipv4. */
+	struct net_device *gre_dev;	/* Store the gre netdev associated with the tunnel. */
+};
+
+struct nss_eogremgr_tunnel *tunnels[NSS_EOGREMGR_MAX_TUNNELS] = {NULL};
+
+/*
+ * nss_eogremgr_get_if_num_inner()
+ * 	Get the GRE inner interface number associated with the tunnel.
+ */
+int nss_eogremgr_get_if_num_inner(uint32_t tunnel_id)
+{
+	int inner_ifnum;
+
+	/*
+	 * Check if the tunnel exists.
+	 */
+	if (!tunnels[tunnel_id]) {
+		nss_eogremgr_warn("Could not find tunnel data, for tunnel id : %d\n", tunnel_id);
+		return -1;
+	}
+
+	/*
+	 * Find inner ifnum for the GRE netdev.
+	 */
+	inner_ifnum = nss_cmn_get_interface_number_by_dev_and_type(
+			tunnels[tunnel_id]->gre_dev, NSS_DYNAMIC_INTERFACE_TYPE_GRE_INNER);
+	if (inner_ifnum < 0) {
+		nss_eogremgr_warn("No GRE inner interface registered for tunnel id : %d\n", tunnel_id);
+		return -1;
+	}
+
+	/*
+	 * Append the core-id to the ifnum.
+	 */
+	return nss_gre_ifnum_with_core_id(inner_ifnum);
+}
+EXPORT_SYMBOL(nss_eogremgr_get_if_num_inner);
+
+/*
+ * nss_eogremgr_tunnel_disable()
+ * 	Function to disable eogre tunnel.
+ */
+nss_eogremgr_status_t nss_eogremgr_tunnel_disable(uint32_t tunnel_id)
+{
+	int ret;
+
+	/*
+	 * Check if the tunnel exists.
+	 */
+	if (!tunnels[tunnel_id]) {
+		nss_eogremgr_warn("Could not find tunnel data for tunnel id : %d\n", tunnel_id);
+		return NSS_EOGRE_ERR_NETDEV_DISABLE_FAILED;
+	}
+
+	/*
+	 * Notify GRE client to disable GRE tunnel.
+	 */
+	ret = nss_connmgr_gre_dev_close(tunnels[tunnel_id]->gre_dev);
+	if (ret != 0) {
+		nss_eogremgr_warn("%s: Unable to disable tunnel:%d\n\n",
+				tunnels[tunnel_id]->gre_dev->name, tunnel_id);
+		return NSS_EOGRE_ERR_NETDEV_DISABLE_FAILED;
+	}
+
+	return NSS_EOGRE_SUCCESS;
+}
+EXPORT_SYMBOL(nss_eogremgr_tunnel_disable);
+
+/*
+ * nss_eogremgr_tunnel_enable()
+ * 	Function to enable eogre tunnel.
+ */
+nss_eogremgr_status_t nss_eogremgr_tunnel_enable(uint32_t tunnel_id)
+{
+	int ret;
+
+	/*
+	 * Check if the tunnel exists.
+	 */
+	if (!tunnels[tunnel_id]) {
+		nss_eogremgr_warn("Could not find tunnel data for tunnel id : %d\n", tunnel_id);
+		return NSS_EOGRE_ERR_NETDEV_ENABLE_FAILED;
+	}
+
+	/*
+	 * Notify GRE client to enable GRE tunnel.
+	 */
+	ret = nss_connmgr_gre_dev_open(tunnels[tunnel_id]->gre_dev);
+	if (ret != 0) {
+		nss_eogremgr_warn("%s: Unable to enable tunnel:%d\n",
+				tunnels[tunnel_id]->gre_dev->name, tunnel_id);
+		return NSS_EOGRE_ERR_NETDEV_ENABLE_FAILED;
+	}
+
+	return NSS_EOGRE_SUCCESS;
+}
+EXPORT_SYMBOL(nss_eogremgr_tunnel_enable);
+
+/*
+ * nss_eogremgr_tunnel_destroy_ipv6_rule()
+ * 	Destroy a given IPv6 connection in the NSS
+ */
+static nss_tx_status_t nss_eogremgr_tunnel_destroy_ipv6_rule(struct nss_ctx_instance *nss_ctx, struct nss_ipv6_5tuple *ni5t)
+{
+	struct nss_ipv6_msg nim;
+	nss_tx_status_t status;
+
+	nss_eogremgr_info("%px: ctx: Destroy IPv6: %pI6:%u, %pI6:%u, p: %d\n", nss_ctx,
+			&ni5t->flow_ip, ni5t->flow_ident, &ni5t->return_ip, ni5t->return_ident, ni5t->protocol);
+
+	nss_ipv6_msg_init(&nim, NSS_IPV6_RX_INTERFACE, NSS_IPV6_TX_DESTROY_RULE_MSG,
+			sizeof(struct nss_ipv6_rule_destroy_msg), NULL, NULL);
+
+	nim.msg.rule_destroy.tuple = *ni5t;
+	status = nss_ipv6_tx_sync(nss_ctx, &nim);
+	if (status != NSS_TX_SUCCESS) {
+		nss_eogremgr_warn("%px: Destroy IPv6 message failed %d\n", nss_ctx, status);
+	}
+
+	return status;
+}
+
+/*
+ * nss_eogremgr_tunnel_destroy_ipv4_rule()
+ * 	Destroy a given IPv4 connection in the NSS
+ */
+static nss_tx_status_t nss_eogremgr_tunnel_destroy_ipv4_rule(struct nss_ctx_instance *nss_ctx, struct nss_ipv4_5tuple *ni5t)
+{
+	struct nss_ipv4_msg nim;
+	nss_tx_status_t status;
+
+	nss_eogremgr_info("%px: ctx: Destroy IPv4: %pI4h :%u, %pI4h :%u, p: %d\n", nss_ctx,
+			&ni5t->flow_ip, ni5t->flow_ident, &ni5t->return_ip, ni5t->return_ident, ni5t->protocol);
+
+	nss_ipv4_msg_init(&nim, NSS_IPV4_RX_INTERFACE, NSS_IPV4_TX_DESTROY_RULE_MSG,
+			sizeof(struct nss_ipv4_rule_destroy_msg), NULL, NULL);
+
+	nim.msg.rule_destroy.tuple = *ni5t;
+	status = nss_ipv4_tx_sync(nss_ctx, &nim);
+	if (status != NSS_TX_SUCCESS) {
+		nss_eogremgr_warn("%px: Destroy IPv4 message failed %d\n", nss_ctx, status);
+	}
+
+	return status;
+}
+
+/*
+ * nss_eogremgr_tunnel_create_ipv6_rule()
+ * 	Create a given IPv6 connection in the NSS
+ */
+static nss_tx_status_t nss_eogremgr_tunnel_create_ipv6_rule(struct nss_ctx_instance *nss_ctx, struct nss_ipv6_rule_create_msg *nircm)
+{
+	struct nss_ipv6_msg nim;
+	nss_tx_status_t status;
+
+	nss_ipv6_msg_init(&nim, NSS_IPV6_RX_INTERFACE, NSS_IPV6_TX_CREATE_RULE_MSG,
+			sizeof(struct nss_ipv6_rule_create_msg), NULL, NULL);
+
+	nim.msg.rule_create = *nircm;
+	status = nss_ipv6_tx_sync(nss_ctx, &nim);
+	if (status != NSS_TX_SUCCESS) {
+		nss_eogremgr_warn("%px: Create IPv6 message failed %d\n", nss_ctx, status);
+	}
+
+	return status;
+}
+
+/*
+ * nss_eogremgr_tunnel_create_ipv4_rule()
+ * 	Create a given IPv4 connection in the NSS
+ */
+static nss_tx_status_t nss_eogremgr_tunnel_create_ipv4_rule(struct nss_ctx_instance *nss_ctx, struct nss_ipv4_rule_create_msg *nircm)
+{
+	struct nss_ipv4_msg nim;
+	nss_tx_status_t status;
+
+	nss_ipv4_msg_init(&nim, NSS_IPV4_RX_INTERFACE, NSS_IPV4_TX_CREATE_RULE_MSG,
+			sizeof(struct nss_ipv4_rule_create_msg), NULL, NULL);
+
+	nim.msg.rule_create = *nircm;
+	status = nss_ipv4_tx_sync(nss_ctx, &nim);
+	if (status != NSS_TX_SUCCESS) {
+		nss_eogremgr_warn("%px: Create IPv4 message failed %d\n", nss_ctx, status);
+	}
+
+	return status;
+}
+
+/*
+ * nss_eogremgr_tunnel_destroy()
+ * 	Destroy the EoGRE tunnel.
+ */
+nss_eogremgr_status_t nss_eogremgr_tunnel_destroy(uint32_t tunnel_id)
+{
+	struct nss_ctx_instance *nss_ctx_v4, *nss_ctx_v6;
+	nss_tx_status_t status;
+	enum nss_connmgr_gre_err_codes err_code_gre;
+
+	/*
+	 * Check if tunnel_id is in range.
+	 */
+	if (tunnel_id >= NSS_EOGREMGR_MAX_TUNNELS) {
+		nss_eogremgr_warn("Invalid param tunnel_id: %d max: %d\n",
+				tunnel_id, NSS_EOGREMGR_MAX_TUNNELS);
+		return NSS_EOGRE_ERR_INVALID_TUNNEL_ID;
+	}
+
+	/*
+	 * Check if the tunnel_id exists.
+	 */
+	if (!tunnels[tunnel_id]) {
+		nss_eogremgr_warn("Could not find tunnel data for tunnel id : %d\n", tunnel_id);
+		return NSS_EOGRE_ERR_INVALID_TUNNEL_ID;
+	}
+
+	/*
+	 * Destroy IPv4/IPv6 rule
+	 */
+	if (tunnels[tunnel_id]->is_ipv4) {
+		nss_ctx_v4 = nss_ipv4_get_mgr();
+		if (!nss_ctx_v4) {
+			nss_eogremgr_warn("Tunnel: %d: could not get IPv4 ctx\n", tunnel_id);
+			return NSS_EOGRE_ERR_BAD_NSS_CTX;
+		}
+		status = nss_eogremgr_tunnel_destroy_ipv4_rule(nss_ctx_v4, &tunnels[tunnel_id]->ip_rule.v4.tuple);
+	} else {
+		nss_ctx_v6 = nss_ipv6_get_mgr();
+		if (!nss_ctx_v6) {
+			nss_eogremgr_warn("Tunnel: %d: could not get IPv6 ctx\n", tunnel_id);
+			return NSS_EOGRE_ERR_BAD_NSS_CTX;
+		}
+		status = nss_eogremgr_tunnel_destroy_ipv6_rule(nss_ctx_v6, &tunnels[tunnel_id]->ip_rule.v6.tuple);
+	}
+
+	if (status != NSS_TX_SUCCESS) {
+		nss_eogremgr_warn("Destroy IP rule failed for tunnel: %d\n", tunnel_id);
+		return NSS_EOGRE_ERR_DESTROY_IP_RULE_FAILED;
+	}
+
+	/*
+	 * Destroy GRE tunnel
+	 */
+	err_code_gre = nss_connmgr_gre_destroy_interface(tunnels[tunnel_id]->gre_dev);
+	if (err_code_gre != GRE_SUCCESS) {
+		nss_eogremgr_warn("Could not destroy GRE interface: %d, err=%d\n", tunnel_id, err_code_gre);
+
+		/*
+		 * Add the ipv4/upv6 rule back, since tunnel is not destroyed.
+		 */
+		if (tunnels[tunnel_id]->is_ipv4) {
+			status = nss_eogremgr_tunnel_create_ipv4_rule(
+					nss_ctx_v4, &tunnels[tunnel_id]->ip_rule.v4);
+		} else {
+			status = nss_eogremgr_tunnel_create_ipv6_rule(
+					nss_ctx_v6, &tunnels[tunnel_id]->ip_rule.v6);
+		}
+
+		if (status != NSS_TX_SUCCESS) {
+			nss_eogremgr_warn("IPv4/IPv6 rule create from nss_eogremgr_tunnel_destroy() failed with status: %d\n",
+					status);
+			return NSS_EOGRE_ERR_CREATE_IP_RULE_FAILED;
+		}
+
+		return NSS_EOGRE_TUNNEL_DESTROY_FAILED;
+	}
+
+	nss_eogremgr_info("Tunnel: %d is destroyed\n", tunnel_id);
+	kfree(tunnels[tunnel_id]);
+	tunnels[tunnel_id] = NULL;
+	return NSS_EOGRE_SUCCESS;
+}
+EXPORT_SYMBOL(nss_eogremgr_tunnel_destroy);
+
+/*
+ * nss_eogremgr_tunnel_ipv6_create()
+ * 	API to create EoGRE IPv6 tunnel and Push IPv6 rule to NSS.
+ */
+nss_eogremgr_status_t nss_eogremgr_tunnel_ipv6_create(struct nss_connmgr_gre_cfg *cfg,
+	       	struct nss_ipv6_rule_create_msg *nircm, uint32_t tunnel_id)
+{
+	struct net_device *gre_dev;
+	struct nss_ctx_instance *nss_ctx;
+	enum nss_connmgr_gre_err_codes err_code_gre;
+	nss_tx_status_t status;
+	int gre_outer_ifnum;
+
+	/*
+	 * Check if tunnel_id is in range.
+	 */
+	if (tunnel_id >= NSS_EOGREMGR_MAX_TUNNELS) {
+		nss_eogremgr_warn("Invalid tunnel_id: %d max: %d\n",
+				tunnel_id, NSS_EOGREMGR_MAX_TUNNELS);
+		return NSS_EOGRE_ERR_INVALID_TUNNEL_ID;
+	}
+
+	/*
+	 * Check if tunnel_id is already in use.
+	 */
+	if (tunnels[tunnel_id]) {
+		nss_eogremgr_warn("Tunnel already exist for this tunnel_id = %d, netdev = %s\n",
+				tunnel_id, tunnels[tunnel_id]->gre_dev->name);
+		return NSS_EOGRE_ERR_TUNNEL_ID_EXIST;
+	}
+
+	/*
+	 * GRE API to create GRE standard interface
+	 */
+	gre_dev = nss_connmgr_gre_create_interface(cfg, &err_code_gre);
+	if (!gre_dev) {
+		nss_eogremgr_warn("Could not create GRE interface, err=%d\n", err_code_gre);
+		return NSS_EOGRE_ERR_GRE_CREATE_FAILED;
+	}
+
+	nss_eogremgr_info("GRE interface %s created with tunnel_id: %u\n", gre_dev->name, tunnel_id);
+
+	gre_outer_ifnum = nss_cmn_get_interface_number_by_dev_and_type(
+			                        gre_dev, NSS_DYNAMIC_INTERFACE_TYPE_GRE_OUTER);
+	if (gre_outer_ifnum < 0) {
+		nss_eogremgr_warn("No GRE outer interface registered for gre netdev: %s\n", gre_dev->name);
+		nss_connmgr_gre_destroy_interface(gre_dev);
+		return NSS_EOGRE_ERR_INVALID_GRE_IFNUM;
+	}
+
+	nss_ctx = nss_ipv6_get_mgr();
+	if (!nss_ctx) {
+		nss_eogremgr_warn("Tunnel: %d could not get IPv6 ctx\n", tunnel_id);
+		nss_connmgr_gre_destroy_interface(gre_dev);
+		return NSS_EOGRE_ERR_BAD_NSS_CTX;
+	}
+
+	/*
+	 * Configure ifnum for GRE decap node before adding IPv6 rule.
+	 */
+	nircm->conn_rule.flow_interface_num = gre_outer_ifnum;
+
+	/*
+	 * Fill the flow/return ident with 32 bit key, to create a 5-tuple match for GRE with key
+	 */
+	if (cfg->okey_valid) {
+		nircm->tuple.flow_ident = cfg->okey;
+		nircm->tuple.return_ident = cfg->okey;
+	}
+
+	/*
+	 * Push IPv6 rule to NSS
+	 */
+	status = nss_eogremgr_tunnel_create_ipv6_rule(nss_ctx, nircm);
+	if (status != NSS_TX_SUCCESS) {
+		nss_eogremgr_warn("IPv6 rule create failed with status: %d\n", status);
+		nss_connmgr_gre_destroy_interface(gre_dev);
+		return NSS_EOGRE_ERR_CREATE_IP_RULE_FAILED;
+	}
+
+	nss_eogremgr_info("EoGRE Tunnel create done tunnel_id:%d IPv6: %pI6h :%u, %pI6h :%u, p: %d\n",
+			tunnel_id, &nircm->tuple.flow_ip, nircm->tuple.flow_ident, &nircm->tuple.return_ip,
+			nircm->tuple.return_ident, nircm->tuple.protocol);
+
+	/*
+	 * Add EoGRE tunnel entry
+	 */
+	tunnels[tunnel_id] = kzalloc(sizeof(struct nss_eogremgr_tunnel), GFP_ATOMIC);
+	if(!tunnels[tunnel_id]) {
+		nss_eogremgr_warn("Failed to allocate tunnel memory for tunnel_id : %d\n", tunnel_id);
+		nss_eogremgr_tunnel_destroy_ipv6_rule(nss_ctx, &nircm->tuple);
+		nss_connmgr_gre_destroy_interface(gre_dev);
+		return NSS_EOGRE_ERR_ALLOC_TUNNEL_FAILED;
+	}
+
+	memcpy(&tunnels[tunnel_id]->ip_rule.v6, nircm, sizeof(struct nss_ipv6_rule_create_msg));
+	tunnels[tunnel_id]->is_ipv4 = false;
+	tunnels[tunnel_id]->gre_dev = gre_dev;
+
+	return NSS_EOGRE_SUCCESS;
+}
+EXPORT_SYMBOL(nss_eogremgr_tunnel_ipv6_create);
+
+/*
+ * nss_eogremgr_tunnel_ipv4_create()
+ * 	API to create EoGRE IPv4 tunnel and push IPv4 rule to NSS.
+ */
+nss_eogremgr_status_t nss_eogremgr_tunnel_ipv4_create(struct nss_connmgr_gre_cfg *cfg,
+	       	struct nss_ipv4_rule_create_msg *nircm, uint32_t tunnel_id)
+{
+	struct net_device *gre_dev;
+	struct nss_ctx_instance *nss_ctx;
+	int gre_outer_ifnum;
+	nss_tx_status_t status;
+	enum nss_connmgr_gre_err_codes err_code_gre;
+
+	/*
+	 * Check if tunnel_id is in range.
+	 */
+	if (tunnel_id >= NSS_EOGREMGR_MAX_TUNNELS) {
+		nss_eogremgr_warn("Invalid param tunnel_id: %d max: %d\n", tunnel_id, NSS_EOGREMGR_MAX_TUNNELS);
+		return NSS_EOGRE_ERR_INVALID_TUNNEL_ID;
+	}
+
+	/*
+	 * Check if tunnel_id is already in use.
+	 */
+	if (tunnels[tunnel_id]) {
+		nss_eogremgr_warn("Tunnel already exist for this tunnel_id = %d, netdev = %s\n",
+				tunnel_id, tunnels[tunnel_id]->gre_dev->name);
+		return NSS_EOGRE_ERR_TUNNEL_ID_EXIST;
+	}
+
+	/*
+	 * Create GRE standard interface
+	 */
+	gre_dev = nss_connmgr_gre_create_interface(cfg, &err_code_gre);
+	if (!gre_dev) {
+		nss_eogremgr_warn("Could not create GRE interface, err=%d\n", err_code_gre);
+		return NSS_EOGRE_ERR_GRE_CREATE_FAILED;
+	}
+
+	nss_eogremgr_info("GRE interface %s created with tunnel_id: %u\n", gre_dev->name, tunnel_id);
+
+	gre_outer_ifnum = nss_cmn_get_interface_number_by_dev_and_type(
+			gre_dev, NSS_DYNAMIC_INTERFACE_TYPE_GRE_OUTER);
+	if (gre_outer_ifnum < 0) {
+		nss_eogremgr_warn("No GRE outer interface registered for gre netdev: %s\n", gre_dev->name);
+		nss_connmgr_gre_destroy_interface(gre_dev);
+		return NSS_EOGRE_ERR_INVALID_GRE_IFNUM;
+	}
+
+	nss_ctx = nss_ipv4_get_mgr();
+	if (!nss_ctx) {
+		nss_eogremgr_warn("EoGRE couldn't get IPv4 ctx\n");
+		nss_connmgr_gre_destroy_interface(gre_dev);
+		return NSS_EOGRE_ERR_BAD_NSS_CTX;
+	}
+
+	/*
+	 * Configure ifnum for GRE decap node before adding IPv4 rule.
+	 */
+	nircm->conn_rule.flow_interface_num = gre_outer_ifnum;
+
+	/*
+	 * Fill the flow/return ident with 32 bit key, to create a 5-tuple match for GRE with key
+	 */
+	if (cfg->okey_valid) {
+		nircm->tuple.flow_ident = cfg->okey;
+		nircm->tuple.return_ident = cfg->okey;
+		nircm->conn_rule.flow_ident_xlate = cfg->okey;
+		nircm->conn_rule.return_ident_xlate = cfg->okey;
+	}
+
+	/*
+	 * Push IPv4 rule to NSS.
+	 */
+	status = nss_eogremgr_tunnel_create_ipv4_rule(nss_ctx, nircm);
+	if (status != NSS_TX_SUCCESS) {
+		nss_eogremgr_warn("IPv4 rule create failed with status: %d\n", status);
+		nss_connmgr_gre_destroy_interface(gre_dev);
+		return NSS_EOGRE_ERR_CREATE_IP_RULE_FAILED;
+	}
+
+	nss_eogremgr_info("EoGRE Tunnel create done tunnel_id:%d IPv4: %pI4h :%u, %pI4h :%u, p: %d\n",
+			tunnel_id, &nircm->tuple.flow_ip, nircm->tuple.flow_ident, &nircm->tuple.return_ip,
+			nircm->tuple.return_ident, nircm->tuple.protocol);
+
+	/*
+	 * Add EoGRE tunnel entry
+	 */
+	tunnels[tunnel_id] = kzalloc(sizeof(struct nss_eogremgr_tunnel), GFP_ATOMIC);
+	if (!tunnels[tunnel_id]) {
+		nss_eogremgr_warn("Failed to allocate tunnel memory for tunnel_id : %d\n", tunnel_id);
+		nss_eogremgr_tunnel_destroy_ipv4_rule(nss_ctx, &nircm->tuple);
+		nss_connmgr_gre_destroy_interface(gre_dev);
+		return NSS_EOGRE_ERR_ALLOC_TUNNEL_FAILED;
+	}
+
+	memcpy(&tunnels[tunnel_id]->ip_rule.v4, nircm, sizeof(struct nss_ipv4_rule_create_msg));
+	tunnels[tunnel_id]->is_ipv4 = true;
+	tunnels[tunnel_id]->gre_dev = gre_dev;
+
+	return NSS_EOGRE_SUCCESS;
+}
+EXPORT_SYMBOL(nss_eogremgr_tunnel_ipv4_create);
+
+/*
+ * nss_eogremgr_destroy_tunnel_data()
+ * 	Destroy EoGRE global tunnel structure
+ */
+static int nss_eogremgr_destroy_tunnel_data(void)
+{
+	int tunnel_id, status, fail_count = 0;
+
+	for (tunnel_id = 0; tunnel_id < NSS_EOGREMGR_MAX_TUNNELS; tunnel_id++) {
+		if (!tunnels[tunnel_id]) {
+			continue;
+		}
+
+		status = nss_eogremgr_tunnel_destroy(tunnel_id);
+		if (!status) {
+			nss_eogremgr_warn("Unable to delete GRE netdev and IPv4/IPv6 rule for tunnel_id: %u\n", tunnel_id);
+			fail_count++;
+		}
+	}
+
+	return fail_count;
+}
+
+/*
+ * nss_eogremgr_exit_module()
+ * 	Tunnel EoGRE module exit function
+ */
+static void __exit nss_eogremgr_exit_module(void)
+{
+	int status;
+
+	/*
+	 * Destroy tunnel data
+	 */
+	status = nss_eogremgr_destroy_tunnel_data();
+	if (status) {
+		nss_eogremgr_info("Error in module unloading, Unable to delete %d tunnels \n", status);
+		return;
+	}
+
+	nss_eogremgr_info("module unloaded\n");
+}
+
+/*
+ * nss_eogremgr_init_module()
+ * 	Tunnel EoGRE module init function
+ */
+static int __init nss_eogremgr_init_module(void)
+{
+
+#ifdef CONFIG_OF
+	/*
+	 * If the node is not compatible, don't do anything.
+	 */
+	if (!of_find_node_by_name(NULL, "nss-common")) {
+		return 0;
+	}
+#endif
+
+	nss_eogremgr_info("module %s loaded\n", NSS_CLIENT_BUILD_ID);
+
+	return 0;
+
+}
+module_init(nss_eogremgr_init_module);
+module_exit(nss_eogremgr_exit_module);
+
+MODULE_LICENSE("Dual BSD/GPL");
+MODULE_DESCRIPTION("NSS EOGRE manager");
diff --git a/qca-nss-clients/eogremgr/nss_eogremgr_priv.h b/qca-nss-clients/eogremgr/nss_eogremgr_priv.h
new file mode 100644
index 0000000..c9824ec
--- /dev/null
+++ b/qca-nss-clients/eogremgr/nss_eogremgr_priv.h
@@ -0,0 +1,63 @@
+/*
+ **************************************************************************
+ * Copyright (c) 2019, The Linux Foundation. All rights reserved.
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all copies.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ **************************************************************************
+ */
+
+/*
+ * nss_eogremgr_priv.h
+ *	EoGRE manager private defines
+ */
+#include <linux/types.h>
+#include <linux/module.h>
+#include <linux/skbuff.h>
+#include <nss_api_if.h>
+#include <linux/in.h>
+#include <linux/etherdevice.h>
+#include <nss_cmn.h>
+
+#if (NSS_EOGREMGR_DEBUG_LEVEL < 1)
+#define nss_eogremgr_assert(fmt, args...)
+#else
+#define nss_eogremgr_assert(c) if (!(c)) { BUG_ON(!(c)); }
+#endif /* NSS_EOGREMGR_DEBUG_LEVEL */
+
+/*
+ * Compile messages for dynamic enable/disable
+ */
+#if defined(CONFIG_DYNAMIC_DEBUG)
+#define nss_eogremgr_warn(s, ...) pr_debug("%s[%d]:" s, __FUNCTION__, __LINE__, ##__VA_ARGS__)
+#define nss_eogremgr_info(s, ...) pr_debug("%s[%d]:" s, __FUNCTION__, __LINE__, ##__VA_ARGS__)
+#define nss_eogremgr_trace(s, ...) pr_debug("%s[%d]:" s, __FUNCTION__, __LINE__, ##__VA_ARGS__)
+#else /* CONFIG_DYNAMIC_DEBUG */
+/*
+ * Statically compile messages at different levels
+ */
+#if (NSS_EOGREMGR_DEBUG_LEVEL < 2)
+#define nss_eogremgr_warn(s, ...)
+#else
+#define nss_eogremgr_warn(s, ...) pr_warn("%s[%d]:" s, __FUNCTION__, __LINE__, ##__VA_ARGS__)
+#endif
+
+#if (NSS_EOGREMGR_DEBUG_LEVEL < 3)
+#define nss_eogremgr_info(s, ...)
+#else
+#define nss_eogremgr_info(s, ...)   pr_notice("%s[%d]:" s, __FUNCTION__, __LINE__, ##__VA_ARGS__)
+#endif
+
+#if (NSS_EOGREMGR_DEBUG_LEVEL < 4)
+#define nss_eogremgr_trace(s, ...)
+#else
+#define nss_eogremgr_trace(s, ...)  pr_info("%s[%d]:" s, __FUNCTION__, __LINE__, ##__VA_ARGS__)
+#endif
+#endif /* CONFIG_DYNAMIC_DEBUG */
diff --git a/qca-nss-clients/exports/nss_capwap_user.h b/qca-nss-clients/exports/nss_capwap_user.h
new file mode 100644
index 0000000..ba4a684
--- /dev/null
+++ b/qca-nss-clients/exports/nss_capwap_user.h
@@ -0,0 +1,115 @@
+/*
+ **************************************************************************
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all copies.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ **************************************************************************
+ */
+
+/**
+ * nss_capwap_user.h
+ *	NSS CAPWAP definitions for kernel and user space
+ */
+
+#ifndef __NSS_CAPWAP_USER_H
+#define __NSS_CAPWAP_USER_H
+
+/**
+ * Maxmimum values for rule configuration parameters
+ */
+#define NSS_CAPWAP_MAX_MTU			9000
+				/**< maximum MTU supported by NSS FW */
+#define NSS_CAPWAP_MAX_BUFFER_SIZE		9000
+				/**< maximum buffer-size supported by NSS FW */
+#define NSS_CAPWAP_MAX_FRAGMENTS		10
+				/**< maximum fragments for reassembly */
+#define NSS_CAPWAP_MAX_REASSEMBLY_TIMEOUT	(10 * 1000)
+				/**< maximum timeout for reassembly - 10 seconds */
+
+/**
+ * CAPWAP Rule configure message flags
+ */
+#define NSS_CAPWAP_TUNNEL_IPV4		2
+				/**< IPv4 tunnel */
+#define NSS_CAPWAP_TUNNEL_IPV6		3
+				/**< IPv6 tunnel */
+#define NSS_CAPWAP_TUNNEL_UDP		4
+				/**< UDP tunnel */
+#define NSS_CAPWAP_TUNNEL_UDPLite	5
+				/**< UDPLite tunnel */
+
+/**
+ * CAPWAP tunnel create and type flags. These flags are used
+ * to determine packet header size during encapsulation.
+ */
+#define NSS_CAPWAP_RULE_CREATE_VLAN_CONFIGURED	0x1
+				/**< VLAN Configured for CAPWAP tunnel */
+#define NSS_CAPWAP_RULE_CREATE_PPPOE_CONFIGURED	0x2
+				/**< PPPoE configured for CAPWAP tunnel */
+#define NSS_CAPWAP_ENCAP_UDPLITE_HDR_CSUM	0x4
+				/**< Generate only UDP-Lite header checksum. Otherwise whole UDP-Lite payload */
+
+/**
+ * CAPWAP version
+ */
+#define NSS_CAPWAP_VERSION_V1		0x1
+				/**< RFC CAPWAP version */
+#define NSS_CAPWAP_VERSION_V2		0x2
+				/**< Initial CAPWAP version for a customer */
+
+/**
+ * Type of packet. These are mutually exclusive fields.
+ */
+#define NSS_CAPWAP_PKT_TYPE_UNKNOWN	0x0000
+				/**< Don't know the type of CAPWAP packet */
+#define NSS_CAPWAP_PKT_TYPE_CONTROL	0x0001
+				/** It's a control CAPWAP packet src_port=5247 */
+#define NSS_CAPWAP_PKT_TYPE_DATA	0x0002
+				/**< It's a data CAPWAP packet src_port=5246 */
+
+/**
+ * Addtional fields for identifying what's there in the packet.
+ */
+#define NSS_CAPWAP_PKT_TYPE_DTLS_ENABLED	0x0004
+				/**< It's a DTLS packet. */
+#define NSS_CAPWAP_PKT_TYPE_WIRELESS_INFO	0x0008
+				/**< W=1, wireless info present */
+#define NSS_CAPWAP_PKT_TYPE_802_11		0x0010
+				/**< T=1, then set wbid=1 */
+#define NSS_CAPWAP_PKT_TYPE_802_3		0x0020
+				/**< Data is in 802.3 format */
+
+/**
+ * CAPWAP metaheader per-packet for both encap (TX) and decap (RX).
+ */
+struct nss_capwap_metaheader {
+	uint8_t version;	/**< CAPWAP version */
+	uint8_t rid;		/**< Radio ID */
+	uint16_t tunnel_id;	/**< Tunnel-ID */
+	uint8_t dscp;		/**< DSCP value */
+	uint8_t vlan_pcp;	/**< VLAN priority .P marking */
+	uint16_t type;		/**< Type of CAPWAP packet & What was there in CAPWAP header */
+	uint16_t nwireless;	/**< Number of wireless info sections in CAPWAP header */
+	uint16_t wireless_qos;	/**< 802.11e qos info */
+	uint16_t outer_sgt;	/**< Security Group Tag value in the TrustSec header */
+	uint16_t inner_sgt;	/**< Security Group Tag value in the TrustSec header */
+	uint32_t flow_id;	/**< Flow identification */
+	uint16_t vapid;		/**< VAP ID info */
+
+	uint16_t magic;		/**< Magic for verification purpose. Use only for debugging */
+
+	/*
+	 * Put the wl_info at last so we don't have to do copy if 802.11 to 802.3 conversion did not happen.
+	 */
+	uint8_t wl_info[8];	/* Wireless info preserved from the original packet */
+} __packed __aligned(4);
+
+#endif /* __NSS_CAPWAP_USER_H */
diff --git a/qca-nss-clients/exports/nss_capwapmgr.h b/qca-nss-clients/exports/nss_capwapmgr.h
new file mode 100644
index 0000000..5833d35
--- /dev/null
+++ b/qca-nss-clients/exports/nss_capwapmgr.h
@@ -0,0 +1,413 @@
+/*
+ **************************************************************************
+ * Copyright (c) 2014-2020, The Linux Foundation. All rights reserved.
+ * Permission to use, copy, modify, and/or distribute this software for
+ * any purpose with or without fee is hereby granted, provided that the
+ * above copyright notice and this permission notice appear in all copies.
+ * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
+ * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
+ * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
+ * ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
+ * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
+ * ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
+ * OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
+ **************************************************************************
+ */
+
+/**
+ * nss_capwapmgr.h
+ *	CAPWAP manager for NSS
+ */
+#ifndef __NSS_CAPWAPMGR_H
+#define __NSS_CAPWAPMGR_H
+
+#include <nss_dtlsmgr.h>
+
+/*
+ * Maximum number of tunnels currently supported
+ */
+#define NSS_CAPWAPMGR_MAX_TUNNELS		32
+
+#define NSS_CAPWAPMGR_TUNNEL_STATE_CONFIGURED		0x1
+					/**< Bit is set if tunnel has been configured */
+#define NSS_CAPWAPMGR_TUNNEL_STATE_ENABLED		0x2
+					/**< Bit is set if tunnel has been enabled */
+#define NSS_CAPWAPMGR_TUNNEL_STATE_IPRULE_CONFIGURED	0x4
+					/**< Bit is set if tunnel IP rule exist */
+
+/*
+ * Tunnel feature flags
+ */
+#define NSS_CAPWAPMGR_FEATURE_DTLS_ENABLED		0x00000001	/* Tunnel enabled DTLS. */
+#define NSS_CAPWAPMGR_FEATURE_INNER_TRUSTSEC_ENABLED	0x00000002	/* Tunnel enabled inner trustsec. */
+#define NSS_CAPWAPMGR_FEATURE_OUTER_TRUSTSEC_ENABLED	0x00000004	/* Tunnel enabled outer trustsec. */
+#define NSS_CAPWAPMGR_FEATURE_WIRELESS_QOS_ENABLED	0x00000008	/* Tunnel enabled wireless QoS. */
+
+/*
+ * All CAPWAP messages to NSS FW are sync in nature. It means we have
+ * to wait for ACK/NACK from NSS FW before proceeding further.
+ * Keep a NSS FW response table to wakeup sync message caller.
+ */
+struct nss_capwapmgr_response {
+	struct semaphore sem;
+	wait_queue_head_t wq;
+	enum nss_cmn_response response;
+	nss_capwap_msg_response_t error;
+	atomic_t seq;
+};
+
+/**
+ * Mapping table from tunnel-id to if_num and rule.
+ */
+struct nss_capwapmgr_tunnel {
+	struct net_device *dtls_dev;		/**< DTLS netdevice */
+	uint32_t if_num_inner;			/**< Interface number of the INNER CAPWAP node */
+	uint32_t if_num_outer;			/**< Interface number of the OUTER CAPWAP node */
+	uint32_t tunnel_state;			/**< Tunnel state */
+	uint16_t type_flags;            /**< Tunnel Type to determine header size */
+	union {
+		struct nss_ipv4_create v4;	/**< IPv4 rule structure */
+		struct nss_ipv6_create v6;	/**< IPv6 rule struture */
+	} ip_rule;
+	struct nss_capwap_rule_msg capwap_rule;	/**< Copy of CAPWAP rule */
+};
+
+/**
+ * Private structure to store information needed by a nss_capwap net_device
+ */
+struct nss_capwapmgr_priv {
+	struct nss_ctx_instance *nss_ctx;	/**< Pointer to NSS context */
+	struct nss_capwapmgr_tunnel *tunnel;	/**< Pointer to tunnel data */
+	uint8_t *if_num_to_tunnel_id;		/**< Mapping table from if_num to tunnel_id. */
+	struct nss_capwapmgr_response *resp;	/**< Response housekeeping */
+};
+
+/**
+ * CAPWAP status enums
+ */
+typedef enum {
+	/*
+	 * nss_tx_status_t enums
+	 */
+	NSS_CAPWAPMGR_SUCCESS = NSS_TX_SUCCESS,
+	NSS_CAPWAPMGR_FAILURE = NSS_TX_FAILURE,
+	NSS_CAPWAPMGR_FAILURE_QUEUE = NSS_TX_FAILURE_QUEUE,
+	NSS_CAPWAPMGR_FAILURE_NOT_READY = NSS_TX_FAILURE_NOT_READY,
+	NSS_CAPWAPMGR_FAILURE_TOO_LARGE = NSS_TX_FAILURE_TOO_LARGE,
+	NSS_CAPWAPMGR_FAILURE_TOO_SHORT = NSS_TX_FAILURE_TOO_SHORT,
+	NSS_CAPWAPMGR_FAILURE_NOT_SUPPORTED = NSS_TX_FAILURE_NOT_SUPPORTED,
+	NSS_CAPWAPMGR_FAILURE_BAD_PARAM = NSS_TX_FAILURE_BAD_PARAM,
+
+	/*
+	 * CAPWAP specific ones.
+	 */
+	NSS_CAPWAPMGR_FAILURE_TUNNEL_ENABLED = 100,	/**< Tunnel is enabled */
+	NSS_CAPWAP