blob: 52bd11bc36661e4fff32287f6b509f4df1677d8e [file] [log] [blame]
/*
**************************************************************************
* Copyright (c) 2014-2021 The Linux Foundation. All rights reserved.
* Copyright (c) 2022 Qualcomm Innovation Center, Inc. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for
* any purpose with or without fee is hereby granted, provided that the
* above copyright notice and this permission notice appear in all copies.
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT
* OF OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
**************************************************************************
*/
#include <linux/version.h>
#include <linux/types.h>
#include <linux/ip.h>
#include <linux/tcp.h>
#include <linux/module.h>
#include <linux/skbuff.h>
#include <linux/icmp.h>
#include <linux/kthread.h>
#include <linux/pkt_sched.h>
#include <linux/string.h>
#include <net/ip6_route.h>
#include <net/ip6_fib.h>
#include <net/ipv6.h>
#include <net/route.h>
#include <net/ip_fib.h>
#include <net/ip.h>
#include <net/tcp.h>
#include <net/addrconf.h>
#include <asm/unaligned.h>
#include <asm/uaccess.h> /* for put_user */
#include <linux/inet.h>
#include <linux/in6.h>
#include <linux/in.h>
#include <linux/udp.h>
#include <linux/tcp.h>
#include <linux/kernel.h>
#include <linux/netlink.h>
#include <linux/rtnetlink.h>
#include <linux/socket.h>
#include <linux/wireless.h>
#include <net/gre.h>
#if defined(ECM_DB_XREF_ENABLE) && defined(ECM_BAND_STEERING_ENABLE)
#include <linux/if_bridge.h>
#endif
#include <linux/inetdevice.h>
#if defined(ECM_INTERFACE_TUNIPIP6_ENABLE) || defined(ECM_INTERFACE_SIT_ENABLE)
#include <net/ip_tunnels.h>
#endif
#include <net/ip6_tunnel.h>
#include <net/addrconf.h>
#include <linux/if_arp.h>
#include <linux/netfilter_ipv4.h>
#include <linux/netfilter_bridge.h>
#include <linux/if_bridge.h>
#include <net/arp.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_acct.h>
#include <net/netfilter/nf_conntrack_helper.h>
#include <net/netfilter/nf_conntrack_l4proto.h>
#include <net/netfilter/nf_conntrack_zones.h>
#include <net/netfilter/nf_conntrack_core.h>
#include <linux/netfilter_ipv6/ip6_tables.h>
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
#include <net/netfilter/ipv4/nf_defrag_ipv4.h>
#ifdef ECM_INTERFACE_VLAN_ENABLE
#include <linux/../../net/8021q/vlan.h>
#include <linux/if_vlan.h>
#endif
#ifdef ECM_INTERFACE_PPP_ENABLE
#include <linux/if_pppox.h>
#ifdef ECM_INTERFACE_L2TPV2_ENABLE
#include <linux/if_pppol2tp.h>
#endif
#ifdef ECM_INTERFACE_PPTP_ENABLE
#include <linux/netfilter/nf_conntrack_proto_gre.h>
#endif
#endif
#ifdef ECM_INTERFACE_MAP_T_ENABLE
#include <nat46-core.h>
#endif
#ifdef ECM_INTERFACE_VXLAN_ENABLE
#include <net/vxlan.h>
#endif
#ifdef ECM_INTERFACE_OVS_BRIDGE_ENABLE
#include <ovsmgr.h>
#endif
#ifdef ECM_INTERFACE_MACVLAN_ENABLE
#include <linux/if_macvlan.h>
#endif
/*
* Debug output levels
* 0 = OFF
* 1 = ASSERTS / ERRORS
* 2 = 1 + WARN
* 3 = 2 + INFO
* 4 = 3 + TRACE
*/
#define DEBUG_LEVEL ECM_INTERFACE_DEBUG_LEVEL
#ifdef ECM_MULTICAST_ENABLE
#include <mc_ecm.h>
#endif
#include "ecm_types.h"
#include "ecm_db_types.h"
#include "ecm_state.h"
#include "ecm_tracker.h"
#include "ecm_classifier.h"
#include "ecm_front_end_types.h"
#include "ecm_tracker_datagram.h"
#include "ecm_tracker_udp.h"
#include "ecm_tracker_tcp.h"
#include "ecm_db.h"
#include "ecm_front_end_ipv4.h"
#ifdef ECM_IPV6_ENABLE
#include "ecm_front_end_ipv6.h"
#endif
#include "ecm_interface.h"
#include "exports/ecm_interface_ipsec.h"
#ifdef ECM_INTERFACE_OVPN_ENABLE
#include "ecm_interface_ovpn.h"
#endif
/*
* Wifi event handler structure.
*/
struct ecm_interface_wifi_event {
struct task_struct *thread;
struct socket *sock;
};
static struct ecm_interface_wifi_event __ewn;
#ifdef ECM_INTERFACE_IPSEC_GLUE_LAYER_SUPPORT_ENABLE
/*
* Get ipsecmgr tunnel netdevice method
*/
static struct ecm_interface_ipsec_callback ecm_interface_ipsec_cb;
#endif
/*
* Locking - concurrency control
*/
static DEFINE_SPINLOCK(ecm_interface_lock); /* Protect against SMP access between netfilter, events and private threaded function. */
/*
* Management thread control
*/
static bool ecm_interface_terminate_pending = false; /* True when the user has signalled we should quit */
/*
* Source interface check flag.
* If it is enabled, the acceleration engine will check the flow's interface to see
* whether it matches with the rule's source interface or not.
*/
int ecm_interface_src_check;
#if defined(CONFIG_NET_CLS_ACT) && defined(ECM_CLASSIFIER_DSCP_IGS)
/*
* IGS enabled flag.
* If it is enabled, the acceleration engine will deny the acceleration for the new
* connnection, if the egress interface has ingress qdisc enabled over it.
*/
int ecm_interface_igs_enabled;
#endif
static struct ctl_table_header *ecm_interface_ctl_table_header; /* Sysctl table header */
#ifdef ECM_INTERFACE_OVPN_ENABLE
/*
* Callback structure to support OVPN offload.
*/
static struct ecm_interface_ovpn ovpn;
/*
* ecm_interface_ovpn_register
*/
int ecm_interface_ovpn_register(struct ecm_interface_ovpn *ovpn_cb)
{
spin_lock_bh(&ecm_interface_lock);
if (ovpn.ovpn_update_route) {
spin_unlock_bh(&ecm_interface_lock);
DEBUG_ERROR("OVPN callbacks are registered\n");
return -1;
}
ovpn.ovpn_update_route = ovpn_cb->ovpn_update_route;
ovpn.ovpn_get_ifnum = ovpn_cb->ovpn_get_ifnum;
spin_unlock_bh(&ecm_interface_lock);
return 0;
}
EXPORT_SYMBOL(ecm_interface_ovpn_register);
/*
* ecm_interface_ovpn_unregister
*/
void ecm_interface_ovpn_unregister(void)
{
spin_lock_bh(&ecm_interface_lock);
ovpn.ovpn_update_route = NULL;
ovpn.ovpn_get_ifnum = NULL;
spin_unlock_bh(&ecm_interface_lock);
}
EXPORT_SYMBOL(ecm_interface_ovpn_unregister);
/*
* ecm_interface_ovpn_get_ifnum
*/
static int ecm_interface_ovpn_get_ifnum(struct net_device *dev, struct sk_buff *skb, struct net_device **tun_dev)
{
int ret = -1;
DEBUG_TRACE("Calling registered function to get OVPN ifnum.\n");
spin_lock_bh(&ecm_interface_lock);
if (likely(ovpn.ovpn_get_ifnum)) {
ret = ovpn.ovpn_get_ifnum(dev, skb, tun_dev);
}
spin_unlock_bh(&ecm_interface_lock);
return ret;
}
/*
* ecm_interface_ovpn_update_route
*/
static void ecm_interface_ovpn_update_route(struct net_device *dev, uint32_t *from_addr, uint32_t *to_addr, int version)
{
DEBUG_TRACE("Calling registered function to update OVPN route entry.\n");
spin_lock_bh(&ecm_interface_lock);
if (likely(ovpn.ovpn_update_route)) {
ovpn.ovpn_update_route(dev, from_addr, to_addr, version);
}
spin_unlock_bh(&ecm_interface_lock);
}
#endif
/*
* ecm_interface_get_and_hold_ipsec_tun_netdev()
* Returns the nss tunnel interface net_dev
*/
struct net_device *ecm_interface_get_and_hold_ipsec_tun_netdev(struct net_device *dev, struct sk_buff *skb, int32_t *interface_type)
{
struct net_device *ipsec_dev = NULL;
#ifdef ECM_INTERFACE_IPSEC_GLUE_LAYER_SUPPORT_ENABLE
spin_lock_bh(&ecm_interface_lock);
if (!ecm_interface_ipsec_cb.tunnel_get_and_hold) {
spin_unlock_bh(&ecm_interface_lock);
DEBUG_WARN("IPSec glue module is not loaded yet\n");
return NULL;
}
ipsec_dev = ecm_interface_ipsec_cb.tunnel_get_and_hold(dev, skb, interface_type);
spin_unlock_bh(&ecm_interface_lock);
#endif
return ipsec_dev;
}
/*
* ecm_interface_get_and_hold_dev_master()
* Returns the master device of a net device if any.
*/
struct net_device *ecm_interface_get_and_hold_dev_master(struct net_device *dev)
{
struct net_device *master;
#ifdef ECM_INTERFACE_OVS_BRIDGE_ENABLE
if (ecm_interface_is_ovs_bridge_port(dev)) {
master = ovsmgr_dev_get_master(dev);
if (!master) {
return NULL;
}
dev_hold(master);
return master;
}
#endif
rcu_read_lock();
master = netdev_master_upper_dev_get_rcu(dev);
if (!master) {
rcu_read_unlock();
return NULL;
}
dev_hold(master);
rcu_read_unlock();
return master;
}
EXPORT_SYMBOL(ecm_interface_get_and_hold_dev_master);
/*
* ecm_interface_vlan_real_dev()
* Return immediate VLAN net device or Physical device pointer
*/
static inline struct net_device *ecm_interface_vlan_real_dev(struct net_device *vlan_dev)
{
return vlan_dev_next_dev(vlan_dev);
}
/*
* ecm_interface_dev_find_by_local_addr_ipv4()
* Return a hold to the device for the given local IP address. Returns NULL on failure.
*/
static struct net_device *ecm_interface_dev_find_by_local_addr_ipv4(ip_addr_t addr)
{
__be32 be_addr;
struct net_device *dev;
ECM_IP_ADDR_TO_NIN4_ADDR(be_addr, addr);
dev = ip_dev_find(&init_net, be_addr);
return dev;
}
#ifdef ECM_IPV6_ENABLE
/*
* ecm_interface_dev_find_by_local_addr_ipv6()
* Return a hold to the device for the given local IP address. Returns NULL on failure.
*/
static struct net_device *ecm_interface_dev_find_by_local_addr_ipv6(ip_addr_t addr)
{
struct in6_addr addr6;
struct net_device *dev;
ECM_IP_ADDR_TO_NIN6_ADDR(addr6, addr);
dev = (struct net_device *)ipv6_dev_find(&init_net, &addr6, 1);
return dev;
}
#endif
/*
* ecm_interface_dev_find_by_local_addr()
* Return the device on which the local address resides.
*
* Returns a hold to the device or NULL on failure.
*/
struct net_device *ecm_interface_dev_find_by_local_addr(ip_addr_t addr)
{
char __attribute__((unused)) addr_str[40];
DEBUG_ECM_IP_ADDR_TO_STRING(addr_str, addr);
DEBUG_TRACE("Locate dev for: %s\n", addr_str);
if (ECM_IP_ADDR_IS_V4(addr)) {
return ecm_interface_dev_find_by_local_addr_ipv4(addr);
}
#ifdef ECM_IPV6_ENABLE
return ecm_interface_dev_find_by_local_addr_ipv6(addr);
#else
return NULL;
#endif
}
EXPORT_SYMBOL(ecm_interface_dev_find_by_local_addr);
/*
* ecm_interface_dev_find_by_addr()
* Return the net device on which the given IP address resides. Returns NULL on faiure.
*
* NOTE: The device may be the device upon which has a default gateway to reach the address.
* from_local_addr is true when the device was found by a local address search.
*/
struct net_device *ecm_interface_dev_find_by_addr(ip_addr_t addr, bool *from_local_addr)
{
char __attribute__((unused)) addr_str[40];
struct ecm_interface_route ecm_rt;
struct net_device *dev;
struct dst_entry *dst;
DEBUG_ECM_IP_ADDR_TO_STRING(addr_str, addr);
/*
* Is the address a local IP?
*/
DEBUG_TRACE("find net device for address: %s\n", addr_str);
dev = ecm_interface_dev_find_by_local_addr(addr);
if (dev) {
*from_local_addr = true;
DEBUG_TRACE("addr: %s is local: %px (%s)\n", addr_str, dev, dev->name);
return dev;
}
DEBUG_TRACE("addr: %s is not local\n", addr_str);
/*
* Try a route to the address instead
* NOTE: This will locate a route entry in the route destination *cache*.
*/
if (!ecm_interface_find_route_by_addr(addr, &ecm_rt)) {
DEBUG_WARN("addr: %s - no dev locatable\n", addr_str);
return NULL;
}
*from_local_addr = false;
dst = ecm_rt.dst;
dev = dst->dev;
dev_hold(dev);
ecm_interface_route_release(&ecm_rt);
DEBUG_TRACE("dest_addr: %s uses dev: %px(%s)\n", addr_str, dev, dev->name);
return dev;
}
EXPORT_SYMBOL(ecm_interface_dev_find_by_addr);
#ifdef ECM_IPV6_ENABLE
/*
* ecm_interface_mac_addr_get_ipv6()
* Return mac for an IPv6 address
*
* GGG TODO Need to make sure this also works for local IP addresses too.
*/
static bool ecm_interface_mac_addr_get_ipv6(ip_addr_t addr, uint8_t *mac_addr, bool *on_link, ip_addr_t gw_addr)
{
struct in6_addr daddr;
struct ecm_interface_route ecm_rt;
struct neighbour *neigh;
struct rt6_info *rt;
struct dst_entry *dst;
/*
* Get the MAC address that corresponds to IP address given.
* We look up the rt6_info entries and, from its neighbour structure, obtain the hardware address.
* This means we will also work if the neighbours are routers too.
*/
ECM_IP_ADDR_TO_NIN6_ADDR(daddr, addr);
if (!ecm_interface_find_route_by_addr(addr, &ecm_rt)) {
*on_link = false;
return false;
}
DEBUG_ASSERT(!ecm_rt.v4_route, "Did not locate a v6 route!\n");
/*
* Is this destination on link or off-link via a gateway?
*/
rt = ecm_rt.rt.rtv6;
if (!ECM_IP_ADDR_MATCH(rt->rt6i_dst.addr.in6_u.u6_addr32, rt->rt6i_gateway.in6_u.u6_addr32) || (rt->rt6i_flags & RTF_GATEWAY)) {
*on_link = false;
ECM_NIN6_ADDR_TO_IP_ADDR(gw_addr, rt->rt6i_gateway)
} else {
*on_link = true;
}
rcu_read_lock();
dst = ecm_rt.dst;
neigh = dst_neigh_lookup(dst, &daddr);
if (!neigh) {
neigh = neigh_lookup(&nd_tbl, &daddr, dst->dev);
}
if (!neigh) {
rcu_read_unlock();
ecm_interface_route_release(&ecm_rt);
DEBUG_WARN("No neigh reference\n");
return false;
}
if (!(neigh->nud_state & NUD_VALID)) {
rcu_read_unlock();
neigh_release(neigh);
ecm_interface_route_release(&ecm_rt);
DEBUG_WARN("NUD invalid\n");
return false;
}
if (!neigh->dev) {
rcu_read_unlock();
neigh_release(neigh);
ecm_interface_route_release(&ecm_rt);
DEBUG_WARN("Neigh dev invalid\n");
return false;
}
/*
* If neigh->dev is a loopback then addr is a local address in which case we take the MAC from given device
*/
if (neigh->dev->flags & IFF_LOOPBACK) {
// GGG TODO Create an equivalent logic to that for ipv4, maybe need to create an ip6_dev_find()?
DEBUG_TRACE("local address " ECM_IP_ADDR_OCTAL_FMT " (found loopback)\n", ECM_IP_ADDR_TO_OCTAL(addr));
eth_zero_addr(mac_addr);
} else {
ether_addr_copy(mac_addr, neigh->ha);
}
rcu_read_unlock();
neigh_release(neigh);
ecm_interface_route_release(&ecm_rt);
DEBUG_TRACE(ECM_IP_ADDR_OCTAL_FMT " maps to %pM\n", ECM_IP_ADDR_TO_OCTAL(addr), mac_addr);
return true;
}
/*
* ecm_interface_find_gateway_ipv6()
* Finds the ipv6 gateway ip address of a given ipv6 address.
*/
static bool ecm_interface_find_gateway_ipv6(ip_addr_t addr, ip_addr_t gw_addr)
{
struct ecm_interface_route ecm_rt;
struct rt6_info *rt;
/*
* Find the ipv6 route of the given ip address to look up
* whether we have a gateway to reach to that ip address or not.
*/
if (!ecm_interface_find_route_by_addr(addr, &ecm_rt)) {
return false;
}
DEBUG_ASSERT(!ecm_rt.v4_route, "Did not locate a v6 route!\n");
DEBUG_TRACE("Found route\n");
/*
* Is this destination reachable via a gateway?
*/
rt = ecm_rt.rt.rtv6;
if (ECM_IP_ADDR_MATCH(rt->rt6i_dst.addr.in6_u.u6_addr32, rt->rt6i_gateway.in6_u.u6_addr32) && !(rt->rt6i_flags & RTF_GATEWAY)) {
ecm_interface_route_release(&ecm_rt);
return false;
}
ECM_NIN6_ADDR_TO_IP_ADDR(gw_addr, rt->rt6i_gateway)
ecm_interface_route_release(&ecm_rt);
return true;
}
#endif
/*
* ecm_interface_find_gateway_ipv4()
* Finds the ipv4 gateway address of a given ipv4 address.
*/
static bool ecm_interface_find_gateway_ipv4(ip_addr_t addr, ip_addr_t gw_addr)
{
struct ecm_interface_route ecm_rt;
struct rtable *rt;
/*
* Find the ipv4 route of the given ip address to look up
* whether we have a gateway to reach to that ip address or not.
*/
if (!ecm_interface_find_route_by_addr(addr, &ecm_rt)) {
return false;
}
DEBUG_ASSERT(ecm_rt.v4_route, "Did not locate a v4 route!\n");
DEBUG_TRACE("Found route\n");
/*
* Is this destination reachable via a gateway?
*/
rt = ecm_rt.rt.rtv4;
if (!rt->rt_uses_gateway && !(rt->rt_flags & RTF_GATEWAY)) {
ecm_interface_route_release(&ecm_rt);
return false;
}
#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0))
ECM_NIN4_ADDR_TO_IP_ADDR(gw_addr, rt->rt_gateway)
#else
ECM_NIN4_ADDR_TO_IP_ADDR(gw_addr, rt->rt_gw4)
#endif
ecm_interface_route_release(&ecm_rt);
return true;
}
/*
* ecm_interface_find_gateway()
* Finds the gateway ip address of a given ECM ip address type.
*/
bool ecm_interface_find_gateway(ip_addr_t addr, ip_addr_t gw_addr)
{
if (ECM_IP_ADDR_IS_V4(addr)) {
return ecm_interface_find_gateway_ipv4(addr, gw_addr);
}
#ifdef ECM_IPV6_ENABLE
return ecm_interface_find_gateway_ipv6(addr, gw_addr);
#else
return false;
#endif
}
EXPORT_SYMBOL(ecm_interface_find_gateway);
/*
* ecm_interface_mac_addr_get_ipv4()
* Return mac for an IPv4 address
*/
static bool ecm_interface_mac_addr_get_ipv4(ip_addr_t addr, uint8_t *mac_addr, bool *on_link, ip_addr_t gw_addr)
{
struct neighbour *neigh;
struct ecm_interface_route ecm_rt;
struct rtable *rt;
struct dst_entry *dst;
__be32 ipv4_addr;
/*
* Get the MAC address that corresponds to IP address given.
* We look up the rtable entries and, from its neighbour structure, obtain the hardware address.
* This means we will also work if the neighbours are routers too.
* We also locate the MAC if the address is a local host address.
*/
ECM_IP_ADDR_TO_NIN4_ADDR(ipv4_addr, addr);
if (!ecm_interface_find_route_by_addr(addr, &ecm_rt)) {
*on_link = false;
return false;
}
DEBUG_ASSERT(ecm_rt.v4_route, "Did not locate a v4 route!\n");
DEBUG_TRACE("Found route\n");
/*
* Is this destination on link or off-link via a gateway?
*/
rt = ecm_rt.rt.rtv4;
if (rt->rt_uses_gateway || (rt->rt_flags & RTF_GATEWAY)) {
*on_link = false;
#if (LINUX_VERSION_CODE < KERNEL_VERSION(5, 2, 0))
ECM_NIN4_ADDR_TO_IP_ADDR(gw_addr, rt->rt_gateway)
#else
ECM_NIN4_ADDR_TO_IP_ADDR(gw_addr, rt->rt_gw4)
#endif
} else {
*on_link = true;
}
/*
* Get the neighbour entry for the address
*/
rcu_read_lock();
dst = ecm_rt.dst;
neigh = dst_neigh_lookup(dst, &ipv4_addr);
if (!neigh) {
neigh = neigh_lookup(&arp_tbl, &ipv4_addr, dst->dev);
}
if (!neigh) {
rcu_read_unlock();
ecm_interface_route_release(&ecm_rt);
DEBUG_WARN("no neigh\n");
return false;
}
if (!(neigh->nud_state & NUD_VALID)) {
rcu_read_unlock();
neigh_release(neigh);
ecm_interface_route_release(&ecm_rt);
DEBUG_WARN("neigh nud state is not valid\n");
return false;
}
if (!neigh->dev) {
rcu_read_unlock();
neigh_release(neigh);
ecm_interface_route_release(&ecm_rt);
DEBUG_WARN("neigh has no device\n");
return false;
}
/*
* If the device is loopback this will be because the address is a local address
* In this case locate the device that has this local address and get its mac.
*/
if (neigh->dev->type == ARPHRD_LOOPBACK) {
struct net_device *dev;
DEBUG_TRACE("%pI4 finds loopback device, dev: %px (%s)\n", &ipv4_addr, neigh->dev, neigh->dev->name);
rcu_read_unlock();
neigh_release(neigh);
ecm_interface_route_release(&ecm_rt);
/*
* Lookup the device that has this IP address assigned
*/
dev = ip_dev_find(&init_net, ipv4_addr);
if (!dev) {
DEBUG_WARN("Unable to locate dev for: %pI4\n", &ipv4_addr);
return false;
}
memcpy(mac_addr, dev->dev_addr, (size_t)dev->addr_len);
DEBUG_TRACE("is local addr: %pI4, mac: %pM, dev ifindex: %d, dev: %px (%s), dev_type: %d\n",
&ipv4_addr, mac_addr, dev->ifindex, dev, dev->name, dev->type);
dev_put(dev);
return true;
}
if (!(neigh->dev->flags & IFF_NOARP)) {
ether_addr_copy(mac_addr, neigh->ha);
} else {
DEBUG_TRACE("non-arp device: %px (%s, type: %d) to reach %pI4\n", neigh->dev, neigh->dev->name, neigh->dev->type, &ipv4_addr);
eth_zero_addr(mac_addr);
}
DEBUG_TRACE("addr: %pI4, mac: %pM, iif: %d, neigh dev ifindex: %d, dev: %px (%s), dev_type: %d\n",
&ipv4_addr, mac_addr, rt->rt_iif, neigh->dev->ifindex, neigh->dev, neigh->dev->name, neigh->dev->type);
rcu_read_unlock();
neigh_release(neigh);
ecm_interface_route_release(&ecm_rt);
return true;
}
/*
* ecm_interface_mac_addr_get()
* Return the mac address for the given IP address. Returns false on failure.
*/
bool ecm_interface_mac_addr_get(ip_addr_t addr, uint8_t *mac_addr, bool *on_link, ip_addr_t gw_addr)
{
if (ECM_IP_ADDR_IS_V4(addr)) {
return ecm_interface_mac_addr_get_ipv4(addr, mac_addr, on_link, gw_addr);
}
#ifdef ECM_IPV6_ENABLE
return ecm_interface_mac_addr_get_ipv6(addr, mac_addr, on_link, gw_addr);
#else
return false;
#endif
}
EXPORT_SYMBOL(ecm_interface_mac_addr_get);
#ifdef ECM_IPV6_ENABLE
/*
* ecm_interface_mac_addr_get_ipv6_no_route()
* Finds the mac address of a node from its ip address reachable via
* the given device. It looks up the mac address in the neighbour entries.
* It doesn't do any route lookup to find the dst entry.
*/
static bool ecm_interface_mac_addr_get_ipv6_no_route(struct net_device *dev, ip_addr_t addr, uint8_t *mac_addr)
{
struct in6_addr daddr;
struct neighbour *neigh;
struct net_device *local_dev;
memset(mac_addr, 0, ETH_ALEN);
/*
* Get the MAC address that corresponds to IP address given.
*/
ECM_IP_ADDR_TO_NIN6_ADDR(daddr, addr);
local_dev = ipv6_dev_find(&init_net, &daddr, 1);
if (local_dev) {
DEBUG_TRACE("%pi6 is a local address\n", &daddr);
memcpy(mac_addr, dev->dev_addr, ETH_ALEN);
dev_put(local_dev);
return true;
}
rcu_read_lock();
neigh = neigh_lookup(&nd_tbl, &daddr, dev);
if (!neigh) {
rcu_read_unlock();
DEBUG_WARN("No neigh reference\n");
return false;
}
if (!(neigh->nud_state & NUD_VALID)) {
neigh_release(neigh);
rcu_read_unlock();
DEBUG_WARN("NUD invalid\n");
return false;
}
if (!neigh->dev) {
neigh_release(neigh);
rcu_read_unlock();
DEBUG_WARN("Neigh dev invalid\n");
return false;
}
if (neigh->dev->flags & IFF_NOARP) {
neigh_release(neigh);
rcu_read_unlock();
DEBUG_TRACE("dest MAC is zero: %pM\n", mac_addr);
return true;
}
memcpy(mac_addr, neigh->ha, (size_t)neigh->dev->addr_len);
neigh_release(neigh);
rcu_read_unlock();
DEBUG_TRACE(ECM_IP_ADDR_OCTAL_FMT " maps to %pM\n", ECM_IP_ADDR_TO_OCTAL(addr), mac_addr);
return true;
}
#endif
/*
* ecm_interface_mac_addr_get_ipv4_no_route()
* Finds the mac address of a node from its ip address reachable via
* the given device. It looks up the mac address in the neighbour entries.
* It doesn't do any route lookup to find the dst entry.
*/
static bool ecm_interface_mac_addr_get_ipv4_no_route(struct net_device *dev, ip_addr_t ip_addr, uint8_t *mac_addr)
{
struct neighbour *neigh;
__be32 be_addr;
struct net_device *local_dev;
memset(mac_addr, 0, ETH_ALEN);
ECM_IP_ADDR_TO_NIN4_ADDR(be_addr, ip_addr);
local_dev = ip_dev_find(&init_net, be_addr);
if (local_dev) {
DEBUG_TRACE("%pI4n is a local address\n", &be_addr);
memcpy(mac_addr, dev->dev_addr, ETH_ALEN);
dev_put(local_dev);
return true;
}
rcu_read_lock();
neigh = neigh_lookup(&arp_tbl, &be_addr, dev);
if (!neigh) {
rcu_read_unlock();
DEBUG_WARN("no neigh\n");
return false;
}
if (!(neigh->nud_state & NUD_VALID)) {
neigh_release(neigh);
rcu_read_unlock();
DEBUG_WARN("neigh nud state is not valid\n");
return false;
}
if (!neigh->dev) {
neigh_release(neigh);
rcu_read_unlock();
DEBUG_WARN("neigh has no device\n");
return false;
}
if (neigh->dev->flags & IFF_NOARP) {
neigh_release(neigh);
rcu_read_unlock();
DEBUG_TRACE("dest MAC is zero: %pM\n", mac_addr);
return true;
}
memcpy(mac_addr, neigh->ha, (size_t)neigh->dev->addr_len);
neigh_release(neigh);
rcu_read_unlock();
DEBUG_TRACE("dest MAC: %pM\n", mac_addr);
return true;
}
/*
* ecm_interface_mac_addr_get_no_route()
* Return the mac address for the given IP address reacahble via the given device.
* Return false on failure, true on success.
*/
bool ecm_interface_mac_addr_get_no_route(struct net_device *dev, ip_addr_t addr, uint8_t *mac_addr)
{
if (ECM_IP_ADDR_IS_V4(addr)) {
return ecm_interface_mac_addr_get_ipv4_no_route(dev, addr, mac_addr);
}
#ifdef ECM_IPV6_ENABLE
return ecm_interface_mac_addr_get_ipv6_no_route(dev, addr, mac_addr);
#else
return false;
#endif
}
EXPORT_SYMBOL(ecm_interface_mac_addr_get_no_route);
#ifdef ECM_MULTICAST_ENABLE
/*
* ecm_interface_multicast_dest_list_find_if()
* Searches for a given device in a list of interface indices
*
* dev Pointer to the net device to search for
* max_if Number of valid interfaces in the destination interface list
* dest_if_list The destination interface list
*/
static bool ecm_interface_multicast_dest_list_find_if(struct net_device *dev, uint8_t max_if, uint32_t *dest_if_list)
{
struct net_device *dest_dev = NULL;
uint32_t *dst_if_index;
int index;
for (index = 0; index < max_if; index++) {
dst_if_index = ecm_db_multicast_if_first_get_at_index(dest_if_list, index);
dest_dev = dev_get_by_index(&init_net, *dst_if_index);
if (!dest_dev) {
DEBUG_WARN("Found invalid if_index %d at index %d\n", *dst_if_index, index);
continue;
}
if (dest_dev == dev) {
dev_put(dest_dev);
return true;
}
dev_put(dest_dev);
}
return false;
}
/*
* ecm_interface_multicast_check_for_br_dev()
* Find a bridge dev is present or not in an
* array of Ifindexs
*/
bool ecm_interface_multicast_check_for_br_dev(uint32_t dest_if[], uint8_t max_if)
{
struct net_device *br_dev;
int i;
for (i = 0; i < max_if; i++) {
br_dev = dev_get_by_index(&init_net, dest_if[i]);
if (!br_dev) {
/*
* Interface got deleted; but is yet to be updated in MFC table
*/
DEBUG_WARN("Could not find a valid netdev here\n");
continue;
}
if (ecm_front_end_is_bridge_device(br_dev)
#ifdef ECM_INTERFACE_OVS_BRIDGE_ENABLE
|| ecm_front_end_is_ovs_bridge_device(br_dev)
#endif
) {
dev_put(br_dev);
return true;
}
dev_put(br_dev);
}
return false;
}
EXPORT_SYMBOL(ecm_interface_multicast_check_for_br_dev);
#ifdef ECM_INTERFACE_OVS_BRIDGE_ENABLE
/*
* ecm_interface_multicast_check_for_ovs_br_dev()
* Check if OVS bridge dev exists in given list of interfaces.
*/
bool ecm_interface_multicast_check_for_ovs_br_dev(uint32_t dest_if[], uint8_t max_if)
{
int i;
for (i = 0; i < max_if; i++) {
struct net_device *br_dev;
br_dev = dev_get_by_index(&init_net, dest_if[i]);
if (!br_dev) {
/*
* Interface got deleted; but is yet to be updated in MFC table
*/
DEBUG_WARN("Could not find a valid netdev for interface: %d\n", dest_if[i]);
continue;
}
if (ecm_front_end_is_ovs_bridge_device(br_dev)) {
dev_put(br_dev);
return true;
}
dev_put(br_dev);
}
return false;
}
#endif
/*
* ecm_interface_multicast_is_iface_type()
* Checks if interface of type exist in mc_if_index
*/
bool ecm_interface_multicast_is_iface_type(int32_t mc_if_index[], int32_t max_if_index, unsigned short type)
{
int32_t i;
struct net_device *dev;
for (i = 0; i < max_if_index; i++) {
if (!mc_if_index[i]) {
break;
}
dev = dev_get_by_index(&init_net, mc_if_index[i]);
if (!dev) {
DEBUG_WARN("Could not find a valid interface with index = %d\n", mc_if_index[i]);
continue;
}
if (dev->type == type) {
DEBUG_TRACE("Interface dev = %s of type %u\n", dev->name, type);
dev_put(dev);
return true;
}
dev_put(dev);
}
return false;
}
/*
* ecm_interface_multicast_filter_src_interface()
* Filter the source interface from the list.
*/
int32_t ecm_interface_multicast_filter_src_interface(struct ecm_db_connection_instance *ci, uint32_t *mc_dst_if_index)
{
struct ecm_db_iface_instance *ii;
struct ecm_db_iface_instance *from_ifaces[ECM_DB_IFACE_HEIRARCHY_MAX];
ecm_db_iface_type_t ii_type;
int32_t from_ifaces_first;
int32_t from_iface_identifier;
int32_t if_index = ECM_DB_IFACE_HEIRARCHY_MAX;
/*
* Get the interface lists of the connection, we must have at least one interface in the list to continue
*/
from_ifaces_first = ecm_db_connection_interfaces_get_and_ref(ci, from_ifaces, ECM_DB_OBJ_DIR_FROM);
if (from_ifaces_first == ECM_DB_IFACE_HEIRARCHY_MAX) {
return if_index;
}
ii = from_ifaces[ECM_DB_IFACE_HEIRARCHY_MAX - 1];
ii_type = ecm_db_iface_type_get(ii);
if ((ii_type == ECM_DB_IFACE_TYPE_BRIDGE) || (ii_type == ECM_DB_IFACE_TYPE_OVS_BRIDGE)) {
ii = from_ifaces[ECM_DB_IFACE_HEIRARCHY_MAX - 2];
}
from_iface_identifier = ecm_db_iface_interface_identifier_get(ii);
if_index = ecm_interface_multicast_check_for_src_ifindex(mc_dst_if_index, ECM_DB_IFACE_HEIRARCHY_MAX, from_iface_identifier);
ecm_db_connection_interfaces_deref(from_ifaces, from_ifaces_first);
return if_index;
}
/*
* ecm_interface_multicast_check_for_src_if_index()
* Find if a source netdev ifindex is matching with list of
* multicast destination netdev ifindex. If find a match then
* returns a new list of destination netdev ifindex excluding
* the ifindex of source netdev.
*/
int32_t ecm_interface_multicast_check_for_src_ifindex(int32_t mc_if_index[], int32_t max_if_index, int32_t if_num)
{
int32_t i;
int32_t valid_index;
for (i = 0, valid_index = 0; i < max_if_index; i++) {
if (mc_if_index[i] == 0) {
break;
}
if (mc_if_index[i] != if_num) {
mc_if_index[valid_index] = mc_if_index[i];
valid_index++;
continue;
}
}
return valid_index;
}
EXPORT_SYMBOL(ecm_interface_multicast_check_for_src_ifindex);
#endif
#ifdef ECM_INTERFACE_VXLAN_ENABLE
/*
* ecm_interface_vxlan_type_get()
* Function to get VxLAN interface type i.e. inner/outer.
* Returns 0 for outer and 1 for inner.
*/
uint32_t ecm_interface_vxlan_type_get(struct sk_buff *skb)
{
ip_addr_t saddr;
struct net_device *local_dev;
if (!skb) {
return -1;
}
switch (ntohs(skb->protocol)) {
case ETH_P_IP:
ECM_NIN4_ADDR_TO_IP_ADDR(saddr, ip_hdr(skb)->saddr);
break;
case ETH_P_IPV6:
ECM_NIN6_ADDR_TO_IP_ADDR(saddr, ipv6_hdr(skb)->saddr);
break;
default:
DEBUG_WARN("%px: Unknown skb protocol.\n", skb);
return -1;
}
local_dev = ecm_interface_dev_find_by_local_addr(saddr);
if (local_dev) {
dev_put(local_dev);
DEBUG_TRACE("%px: VxLAN outer interface type.\n", skb);
return 0;
}
DEBUG_TRACE("%px: VxLAN inner interface type.\n", skb);
return 1;
}
#endif
/*
* ecm_interface_addr_find_route_by_addr_ipv4()
* Return the route for the given IP address. Returns NULL on failure.
*/
static bool ecm_interface_find_route_by_addr_ipv4(ip_addr_t addr, struct ecm_interface_route *ecm_rt)
{
__be32 be_addr;
/*
* Get a route to the given IP address, this will allow us to also find the interface
* it is using to communicate with that IP address.
*/
ECM_IP_ADDR_TO_NIN4_ADDR(be_addr, addr);
ecm_rt->rt.rtv4 = ip_route_output(&init_net, be_addr, 0, 0, 0);
if (IS_ERR(ecm_rt->rt.rtv4)) {
DEBUG_TRACE("No output route to: %pI4n\n", &be_addr);
return false;
}
DEBUG_TRACE("Output route to: %pI4n is: %px\n", &be_addr, ecm_rt->rt.rtv4);
ecm_rt->dst = (struct dst_entry *)ecm_rt->rt.rtv4;
ecm_rt->v4_route = true;
return true;
}
#ifdef ECM_IPV6_ENABLE
struct rt6_info *ecm_interface_ipv6_route_lookup(struct net *netf, struct in6_addr *addr)
{
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 17, 0))
return rt6_lookup(netf, addr, NULL, 0, 0);
#else
return rt6_lookup(netf, addr, NULL, 0, NULL, 0);
#endif
}
/*
* ecm_interface_addr_find_route_by_addr_ipv6()
* Return the route for the given IP address. Returns NULL on failure.
*/
static bool ecm_interface_find_route_by_addr_ipv6(ip_addr_t addr, struct ecm_interface_route *ecm_rt)
{
struct in6_addr naddr;
ECM_IP_ADDR_TO_NIN6_ADDR(naddr, addr);
/*
* Get a route to the given IP address, this will allow us to also find the interface
* it is using to communicate with that IP address.
*/
ecm_rt->rt.rtv6 = ecm_interface_ipv6_route_lookup(&init_net, &naddr);
if (!ecm_rt->rt.rtv6) {
DEBUG_TRACE("No output route to: " ECM_IP_ADDR_OCTAL_FMT "\n", ECM_IP_ADDR_TO_OCTAL(addr));
return NULL;
}
DEBUG_TRACE("Output route to: " ECM_IP_ADDR_OCTAL_FMT " is: %px\n", ECM_IP_ADDR_TO_OCTAL(addr), ecm_rt->rt.rtv6);
ecm_rt->dst = (struct dst_entry *)ecm_rt->rt.rtv6;
ecm_rt->v4_route = false;
return true;
}
#endif
/*
* ecm_interface_addr_find_route_by_addr()
* Return the route (in the given parameter) for the given IP address. Returns false on failure.
*
* Route is the device on which the addr is reachable, which may be loopback for local addresses.
*
* Returns true if the route was able to be located. The route must be released using ecm_interface_route_release().
*/
bool ecm_interface_find_route_by_addr(ip_addr_t addr, struct ecm_interface_route *ecm_rt)
{
char __attribute__((unused)) addr_str[40];
DEBUG_ECM_IP_ADDR_TO_STRING(addr_str, addr);
DEBUG_TRACE("Locate route to: %s\n", addr_str);
if (ECM_IP_ADDR_IS_V4(addr)) {
return ecm_interface_find_route_by_addr_ipv4(addr, ecm_rt);
}
#ifdef ECM_IPV6_ENABLE
return ecm_interface_find_route_by_addr_ipv6(addr, ecm_rt);
#else
return false;
#endif
}
EXPORT_SYMBOL(ecm_interface_find_route_by_addr);
/*
* ecm_interface_route_release()
* Release an ecm route
*/
void ecm_interface_route_release(struct ecm_interface_route *rt)
{
dst_release(rt->dst);
}
EXPORT_SYMBOL(ecm_interface_route_release);
#ifdef ECM_IPV6_ENABLE
/*
* ecm_interface_send_neighbour_solicitation()
* Issue an IPv6 Neighbour soliciation request.
*/
void ecm_interface_send_neighbour_solicitation(struct net_device *dev, ip_addr_t addr)
{
struct in6_addr dst_addr, src_addr;
struct in6_addr mc_dst_addr;
struct rt6_info *rt6i;
struct neighbour *neigh;
struct net *netf = dev_net(dev);
int ret;
/*
* Find source and destination addresses in Linux format. We need
* mcast destination address as well.
*/
ECM_IP_ADDR_TO_NIN6_ADDR(dst_addr, addr);
addrconf_addr_solict_mult(&dst_addr, &mc_dst_addr);
ret = ipv6_dev_get_saddr(netf, dev, &mc_dst_addr, 0, &src_addr);
/*
* Find the route entry
*/
rt6i = ecm_interface_ipv6_route_lookup(netf, &dst_addr);
if (!rt6i) {
DEBUG_TRACE("IPv6 Route lookup failure for destination IPv6 address " ECM_IP_ADDR_OCTAL_FMT "\n", ECM_IP_ADDR_TO_OCTAL(addr));
return;
}
/*
* Find the neighbor entry
*/
neigh = rt6i->dst.ops->neigh_lookup(&rt6i->dst, NULL, &dst_addr);
if (IS_ERR(neigh)) {
DEBUG_TRACE("Neighbour lookup failure for destination IPv6 address " ECM_IP_ADDR_OCTAL_FMT "\n", ECM_IP_ADDR_TO_OCTAL(addr));
dst_release(&rt6i->dst);
return;
}
/*
* Issue a Neighbour soliciation request
*/
DEBUG_TRACE("Issue Neighbour solicitation request\n");
#if (LINUX_VERSION_CODE < KERNEL_VERSION(4, 10, 0))
ndisc_send_ns(dev, &dst_addr, &mc_dst_addr, &src_addr);
#else
ndisc_send_ns(dev, &dst_addr, &mc_dst_addr, &src_addr, 0);
#endif
neigh_release(neigh);
dst_release(&rt6i->dst);
}
EXPORT_SYMBOL(ecm_interface_send_neighbour_solicitation);
#endif
/*
* ecm_interface_send_arp_request()
* Issue and ARP request.
*/
void ecm_interface_send_arp_request(struct net_device *dest_dev, ip_addr_t dest_addr, bool on_link, ip_addr_t gw_addr)
{
/*
* Possible ARP does not know the address yet
*/
struct neighbour *neigh;
__be32 ipv4_addr;
/*
* Convert the ECM IP address type to network order IPv4 address.
*/
ECM_IP_ADDR_TO_NIN4_ADDR(ipv4_addr, dest_addr);
/*
* If we have a GW for this address, then we have to send ARP request to the GW
*/
if (!on_link && !ECM_IP_ADDR_IS_NULL(gw_addr)) {
ECM_IP_ADDR_TO_NIN4_ADDR(ipv4_addr, gw_addr);
}
/*
* If we don't have this neighbor, create it before sending the arp request,
* so that when we receive the arp reply we update the neigh entry.
*/
neigh = neigh_lookup(&arp_tbl, &ipv4_addr, dest_dev);
if (!neigh) {
neigh = neigh_create(&arp_tbl, &ipv4_addr, dest_dev);
if (IS_ERR(neigh)) {
DEBUG_WARN("Unable to create ARP request neigh for %pI4\n", &ipv4_addr);
return;
}
}
DEBUG_TRACE("Send ARP for %pI4\n", &ipv4_addr);
neigh_event_send(neigh, NULL);
neigh_release(neigh);
}
EXPORT_SYMBOL(ecm_interface_send_arp_request);
/*
* ecm_interface_ipv4_neigh_get()
* Returns neighbour reference for a given IP address which must be released when you are done with it.
*
* Returns NULL on fail.
*/
struct neighbour *ecm_interface_ipv4_neigh_get(ip_addr_t addr)
{
struct neighbour *neigh;
struct rtable *rt;
struct dst_entry *dst;
__be32 ipv4_addr;
ECM_IP_ADDR_TO_NIN4_ADDR(ipv4_addr, addr);
rt = ip_route_output(&init_net, ipv4_addr, 0, 0, 0);
if (IS_ERR(rt)) {
return NULL;
}
dst = (struct dst_entry *)rt;
neigh = dst_neigh_lookup(dst, &ipv4_addr);
ip_rt_put(rt);
return neigh;
}
#ifdef ECM_IPV6_ENABLE
/*
* ecm_interface_ipv6_neigh_get()
* Returns neighbour reference for a given IP address which must be released when you are done with it.
*
* Returns NULL on fail.
*/
struct neighbour *ecm_interface_ipv6_neigh_get(ip_addr_t addr)
{
struct neighbour *neigh;
struct rt6_info *rt;
struct dst_entry *dst;
struct in6_addr ipv6_addr;
ECM_IP_ADDR_TO_NIN6_ADDR(ipv6_addr, addr);
rt = ecm_interface_ipv6_route_lookup(&init_net, &ipv6_addr);
if (!rt) {
return NULL;
}
dst = (struct dst_entry *)rt;
neigh = dst_neigh_lookup(dst, &ipv6_addr);
dst_release(dst);
return neigh;
}
#endif
/*
* ecm_interface_is_pptp()
* skip pptp tunnel encapsulated traffic
*
* ECM does not handle PPTP,
* this function detects packets of that type so they can be skipped over to improve their throughput.
*/
bool ecm_interface_is_pptp(struct sk_buff *skb, const struct net_device *out)
{
struct net_device *in;
/*
* skip first pass of l2tp/pptp tunnel encapsulated traffic
*/
if (out->type == ARPHRD_PPP) {
if (out->priv_flags_ext & IFF_EXT_PPP_PPTP) {
return true;
}
}
in = dev_get_by_index(&init_net, skb->skb_iif);
if (!in) {
return true;
}
if (in->type == ARPHRD_PPP) {
if (in->priv_flags_ext & IFF_EXT_PPP_PPTP) {
dev_put(in);
return true;
}
}
dev_put(in);
return false;
}
/*
* ecm_interface_is_l2tp_packet_by_version()
* Check version of l2tp tunnel encapsulated traffic
*
* ECM does not handle l2tp,
* this function detects packets of that type so they can be skipped over to improve their throughput.
*/
bool ecm_interface_is_l2tp_packet_by_version(struct sk_buff *skb, const struct net_device *out, int ver)
{
uint32_t flag = 0;
struct net_device *in;
switch (ver) {
case 2:
flag = IFF_EXT_PPP_L2TPV2;
break;
case 3:
flag = IFF_EXT_PPP_L2TPV3;
break;
default:
break;
}
/*
* skip first pass of l2tp/pptp tunnel encapsulated traffic
*/
if (out->priv_flags_ext & flag) {
return true;
}
in = dev_get_by_index(&init_net, skb->skb_iif);
if (!in) {
return true;
}
if (in->priv_flags_ext & flag) {
dev_put(in);
return true;
}
dev_put(in);
return false;
}
/*
* ecm_interface_is_l2tp_pptp()
* skip l2tp/pptp tunnel encapsulated traffic
*
* ECM does not handle L2TP or PPTP encapsulated packets,
* this function detects packets of that type so they can be skipped over to improve their throughput.
*/
bool ecm_interface_is_l2tp_pptp(struct sk_buff *skb, const struct net_device *out)
{
struct net_device *in;
/*
* skip first pass of l2tp/pptp tunnel encapsulated traffic
*/
if (out->priv_flags_ext & (IFF_EXT_PPP_L2TPV2 | IFF_EXT_PPP_L2TPV3 |
IFF_EXT_PPP_PPTP)) {
return true;
}
in = dev_get_by_index(&init_net, skb->skb_iif);
if (!in) {
return true;
}
if (in->priv_flags_ext & (IFF_EXT_PPP_L2TPV2 | IFF_EXT_PPP_L2TPV3 |
IFF_EXT_PPP_PPTP)) {
dev_put(in);
return true;
}
dev_put(in);
return false;
}
#ifdef ECM_INTERFACE_VLAN_ENABLE
/*
* ecm_interface_vlan_interface_establish()
* Returns a reference to a iface of the VLAN type, possibly creating one if necessary.
* Returns NULL on failure or a reference to interface.
*/
static struct ecm_db_iface_instance *ecm_interface_vlan_interface_establish(struct ecm_db_interface_info_vlan *type_info,
char *dev_name, int32_t dev_interface_num, int32_t ae_interface_num, int32_t mtu)
{
struct ecm_db_iface_instance *nii;
struct ecm_db_iface_instance *ii;
DEBUG_INFO("Establish VLAN iface: %s with address: %pM, vlan tag: %u, vlan_tpid: %x MTU: %d, if num: %d, accel engine if id: %d\n",
dev_name, type_info->address, type_info->vlan_tag, type_info->vlan_tpid, mtu, dev_interface_num, ae_interface_num);
/*
* Locate the iface
*/
ii = ecm_db_iface_find_and_ref_vlan(type_info->address, type_info->vlan_tag, type_info->vlan_tpid);
if (ii) {
DEBUG_TRACE("%px: iface established\n", ii);
return ii;
}
/*
* No iface - create one
*/
nii = ecm_db_iface_alloc();
if (!nii) {
DEBUG_WARN("Failed to establish iface\n");
return NULL;
}
/*
* Add iface into the database, atomically to avoid races creating the same thing
*/
spin_lock_bh(&ecm_interface_lock);
ii = ecm_db_iface_find_and_ref_vlan(type_info->address, type_info->vlan_tag, type_info->vlan_tpid);
if (ii) {
spin_unlock_bh(&ecm_interface_lock);
ecm_db_iface_deref(nii);
return ii;
}
ecm_db_iface_add_vlan(nii, type_info->address, type_info->vlan_tag, type_info->vlan_tpid, dev_name,
mtu, dev_interface_num, ae_interface_num, NULL, nii);
spin_unlock_bh(&ecm_interface_lock);
DEBUG_TRACE("%px: vlan iface established\n", nii);
return nii;
}
#endif
#ifdef ECM_INTERFACE_MACVLAN_ENABLE
/*
* ecm_interface_macvlan_interface_establish()
* Returns a reference to a iface of the MACVLAN type, possibly creating one if necessary.
* Returns NULL on failure or a reference to interface.
*/
static struct ecm_db_iface_instance *ecm_interface_macvlan_interface_establish(struct ecm_db_interface_info_macvlan *type_info,
char *dev_name, int32_t dev_interface_num, int32_t ae_interface_num, int32_t mtu)
{
struct ecm_db_iface_instance *nii;
struct ecm_db_iface_instance *ii;
DEBUG_INFO("Establish MACVLAN iface: %s with address: %pM, MTU: %d, if num: %d, accel engine if id: %d\n",
dev_name, type_info->address, mtu, dev_interface_num, ae_interface_num);
/*
* Locate the iface
*/
ii = ecm_db_iface_find_and_ref_macvlan(type_info->address);
if (ii) {
DEBUG_TRACE("%px: iface established\n", ii);
return ii;
}
/*
* No iface - create one
*/
nii = ecm_db_iface_alloc();
if (!nii) {
DEBUG_WARN("Failed to establish iface\n");
return NULL;
}
/*
* Add iface into the database, atomically to avoid races creating the same thing
*/
spin_lock_bh(&ecm_interface_lock);
ii = ecm_db_iface_find_and_ref_macvlan(type_info->address);
if (ii) {
spin_unlock_bh(&ecm_interface_lock);
ecm_db_iface_deref(nii);
return ii;
}
ecm_db_iface_add_macvlan(nii, type_info->address, dev_name,
mtu, dev_interface_num, ae_interface_num, NULL, nii);
spin_unlock_bh(&ecm_interface_lock);
DEBUG_TRACE("%px: MACVLAN iface established\n", nii);
return nii;
}
#endif
#if defined(ECM_INTERFACE_OVS_BRIDGE_ENABLE) && defined(ECM_MULTICAST_ENABLE)
/*
* ecm_interface_multicast_ovs_to_interface_get_and_ref()
* Populate ov_ ports/bridge device from multicast 'to' list.
* Returns the number of ovs port count.
*/
int ecm_interface_multicast_ovs_to_interface_get_and_ref(struct ecm_db_connection_instance *ci, struct net_device **to_ovs_port,
struct net_device **to_ovs_brdev)
{
struct net_device *dev;
struct ecm_db_iface_instance *to_mc_ifaces;
int32_t *to_mc_ifaces_first, *to_iface_first, if_cnt, i;
int ovs_port_cnt = 0;
if_cnt = ecm_db_multicast_connection_to_interfaces_get_and_ref_all(ci, &to_mc_ifaces, &to_mc_ifaces_first);
if (!if_cnt) {
DEBUG_WARN("%px: Not able to find 'to' interfaces", ci);
return 0;
}
/*
* The 'to' interfaces ports can be part of different OVS bridges.
* ovs-br1-(eth0, eth1, eth2)
* ovs-br2-(eth3)
* ovs-br3 (eth4, eth5)
*
* to_ovs_port: eth0, eth1, eth2, eth3. eth4. eth5
* to_ovs_brdev: ovs-br1, ovs-br1, ovs-br1, ovs-br2, ovs-br3, ovs-br3
*/
for (i = 0; i < ECM_DB_MULTICAST_IF_MAX; i++) {
struct ecm_db_iface_instance *ii_temp;
int32_t j;
/*
* Find interface list, skip if invalid.
*/
to_iface_first = ecm_db_multicast_if_first_get_at_index(to_mc_ifaces_first, i);
if (*to_iface_first == ECM_DB_IFACE_HEIRARCHY_MAX) {
continue;
}
/*
* We need to find 'to' multicast port to
* update the OVS statistics.
*/
ii_temp = ecm_db_multicast_if_heirarchy_get(to_mc_ifaces, i);
for (j = ECM_DB_IFACE_HEIRARCHY_MAX - 1; j >= *to_iface_first; j--) {
struct net_device *br_dev;
struct ecm_db_iface_instance **ifaces;
struct ecm_db_iface_instance *to_iface;
struct ecm_db_iface_instance *ii_single;
ii_single = ecm_db_multicast_if_instance_get_at_index(ii_temp, j);
ifaces = (struct ecm_db_iface_instance **)ii_single;
to_iface = *ifaces;
dev = dev_get_by_index(&init_net, ecm_db_iface_interface_identifier_get(to_iface));
if (unlikely(!dev)) {
DEBUG_WARN("%px: Failed to get net device with %d index\n", ci, j);
continue;
}
if (!ecm_interface_is_ovs_bridge_port(dev)) {
DEBUG_TRACE("%px: %s_dev: %s at %d index is not an OVS bridge port\n", ci, ecm_db_obj_dir_strings[ECM_DB_OBJ_DIR_TO], dev->name, j);
dev_put(dev);
continue;
}
br_dev = ovsmgr_dev_get_master(dev);
DEBUG_ASSERT(br_dev, "%px: master dev for the OVS port:%s is NULL\n", ci, dev->name);
to_ovs_port[ovs_port_cnt] = dev;
to_ovs_brdev[ovs_port_cnt] = br_dev;
DEBUG_TRACE("%px: %s_dev: %s at %d index is an OVS bridge port. OVS bridge: %s\n", ci, ecm_db_obj_dir_strings[ECM_DB_OBJ_DIR_TO], dev->name, j, br_dev->name);
ovs_port_cnt++;
break;
}
}
ecm_db_multicast_connection_to_interfaces_deref_all(to_mc_ifaces, to_mc_ifaces_first);
return ovs_port_cnt;
}
#endif
#ifdef ECM_INTERFACE_OVS_BRIDGE_ENABLE
/*
* ecm_interface_is_ovs_bridge_port()
* Returns true if dev is OpenVswitch (OVS) bridge port.
*/
bool ecm_interface_is_ovs_bridge_port(const struct net_device *dev)
{
/*
* Check if dev is OVS bridge port.
*/
return !!(dev->priv_flags & IFF_OVS_DATAPATH);
}
#endif
/*
* ecm_interface_bridge_interface_establish()
* Returns a reference to a iface of the BRIDGE type, possibly creating one if necessary.
* Returns NULL on failure or a reference to interface.
*/
static struct ecm_db_iface_instance *ecm_interface_bridge_interface_establish(struct ecm_db_interface_info_bridge *type_info,
char *dev_name, int32_t dev_interface_num, int32_t ae_interface_num, int32_t mtu)
{
struct ecm_db_iface_instance *nii;
struct ecm_db_iface_instance *ii;
DEBUG_INFO("Establish BRIDGE iface: %s with address: %pM, MTU: %d, if num: %d, accel engine if id: %d\n",
dev_name, type_info->address, mtu, dev_interface_num, ae_interface_num);
/*
* Locate the iface
*/
ii = ecm_db_iface_find_and_ref_bridge(type_info->address, dev_interface_num);
if (ii) {
DEBUG_TRACE("%px: iface established\n", ii);
return ii;
}
/*
* No iface - create one
*/
nii = ecm_db_iface_alloc();
if (!nii) {
DEBUG_WARN("Failed to establish iface\n");
return NULL;
}
/*
* Add iface into the database, atomically to avoid races creating the same thing
*/
spin_lock_bh(&ecm_interface_lock);
ii = ecm_db_iface_find_and_ref_bridge(type_info->address, dev_interface_num);
if (ii) {
spin_unlock_bh(&ecm_interface_lock);
ecm_db_iface_deref(nii);
return ii;
}
ecm_db_iface_add_bridge(nii, type_info->address, dev_name,
mtu, dev_interface_num, ae_interface_num, NULL, nii);
spin_unlock_bh(&ecm_interface_lock);
DEBUG_TRACE("%px: bridge iface established\n", nii);
return nii;
}
#ifdef ECM_INTERFACE_OVS_BRIDGE_ENABLE
/*
* ecm_interface_ovs_bridge_interface_establish()
* Returns a reference to a iface of the OVS BRIDGE type, possibly creating one if necessary.
* Returns NULL on failure or a reference to interface.
*/
static struct ecm_db_iface_instance *ecm_interface_ovs_bridge_interface_establish(struct ecm_db_interface_info_ovs_bridge *type_info,
char *dev_name, int32_t dev_interface_num, int32_t ae_interface_num, int32_t mtu)
{
struct ecm_db_iface_instance *nii;
struct ecm_db_iface_instance *ii;
DEBUG_INFO("Establish OVS BRIDGE iface: %s with address: %pM, MTU: %d, if num: %d, accel engine if id: %d\n",
dev_name, type_info->address, mtu, dev_interface_num, ae_interface_num);
/*
* Locate the iface
*/
ii = ecm_db_iface_find_and_ref_ovs_bridge(type_info->address, dev_interface_num);
if (ii) {
DEBUG_TRACE("%px: iface established\n", ii);
return ii;
}
/*
* No iface - create one
*/
nii = ecm_db_iface_alloc();
if (!nii) {
DEBUG_WARN("Failed to establish iface\n");
return NULL;
}
/*
* Add iface into the database, atomically to avoid races creating the same thing
*/
spin_lock_bh(&ecm_interface_lock);
ii = ecm_db_iface_find_and_ref_ovs_bridge(type_info->address, dev_interface_num);
if (ii) {
spin_unlock_bh(&ecm_interface_lock);
ecm_db_iface_deref(nii);
return ii;
}
ecm_db_iface_add_ovs_bridge(nii, type_info->address, dev_name,
mtu, dev_interface_num, ae_interface_num, NULL, nii);
spin_unlock_bh(&ecm_interface_lock);
DEBUG_TRACE("%px: OVS bridge iface established\n", nii);
return nii;
}
#endif
#ifdef ECM_INTERFACE_BOND_ENABLE
/*
* ecm_interface_lag_interface_establish()
* Returns a reference to a iface of the LAG type, possibly creating one if necessary.
* Returns NULL on failure or a reference to interface.
*/
static struct ecm_db_iface_instance *ecm_interface_lag_interface_establish(struct ecm_db_interface_info_lag *type_info,
char *dev_name, int32_t dev_interface_num, int32_t ae_interface_num, int32_t mtu)
{
struct ecm_db_iface_instance *nii;
struct ecm_db_iface_instance *ii;
DEBUG_INFO("Establish LAG iface: %s with address: %pM, MTU: %d, if num: %d, accel engine if id: %d\n",
dev_name, type_info->address, mtu, dev_interface_num, ae_interface_num);
/*
* Locate the iface
*/
ii = ecm_db_iface_find_and_ref_lag(type_info->address);
if (ii) {
DEBUG_TRACE("%px: iface established\n", ii);
return ii;
}
/*
* No iface - create one
*/
nii = ecm_db_iface_alloc();
if (!nii) {
DEBUG_WARN("Failed to establish iface\n");
return NULL;
}
/*
* Add iface into the database, atomically to avoid races creating the same thing
*/
spin_lock_bh(&ecm_interface_lock);
ii = ecm_db_iface_find_and_ref_lag(type_info->address);
if (ii) {
spin_unlock_bh(&ecm_interface_lock);
ecm_db_iface_deref(nii);
return ii;
}
ecm_db_iface_add_lag(nii, type_info->address, dev_name,
mtu, dev_interface_num, ae_interface_num, NULL, nii);
spin_unlock_bh(&ecm_interface_lock);
DEBUG_TRACE("%px: lag iface established\n", nii);
return nii;
}
#endif
/*
* ecm_interface_ethernet_interface_establish()
* Returns a reference to a iface of the ETHERNET type, possibly creating one if necessary.
* Returns NULL on failure or a reference to interface.
*/
static struct ecm_db_iface_instance *ecm_interface_ethernet_interface_establish(struct ecm_db_interface_info_ethernet *type_info,
char *dev_name, int32_t dev_interface_num, int32_t ae_interface_num, int32_t mtu)
{
struct ecm_db_iface_instance *nii;
struct ecm_db_iface_instance *ii;
DEBUG_INFO("Establish ETHERNET iface: %s with address: %pM, MTU: %d, if num: %d, accel engine if id: %d\n",
dev_name, type_info->address, mtu, dev_interface_num, ae_interface_num);
/*
* Locate the iface
*/
ii = ecm_db_iface_ifidx_find_and_ref_ethernet(type_info->address, dev_interface_num);
if (ii) {
DEBUG_TRACE("%px: iface established\n", ii);
/*
* Update the accel engine interface identifier, just in case it was changed.
*/
ecm_db_iface_ae_interface_identifier_set(ii, ae_interface_num);
return ii;
}
/*
* No iface - create one
*/
nii = ecm_db_iface_alloc();
if (!nii) {
DEBUG_WARN("Failed to establish iface\n");
return NULL;
}
/*
* Add iface into the database, atomically to avoid races creating the same thing
*/
spin_lock_bh(&ecm_interface_lock);
ii = ecm_db_iface_ifidx_find_and_ref_ethernet(type_info->address, dev_interface_num);
if (ii) {
spin_unlock_bh(&ecm_interface_lock);
ecm_db_iface_deref(nii);
return ii;
}
ecm_db_iface_add_ethernet(nii, type_info->address, dev_name,
mtu, dev_interface_num, ae_interface_num, NULL, nii);
spin_unlock_bh(&ecm_interface_lock);
DEBUG_TRACE("%px: ethernet iface established\n", nii);
return nii;
}
#ifdef ECM_INTERFACE_PPPOE_ENABLE
/*
* ecm_interface_pppoe_interface_establish()
* Returns a reference to a iface of the PPPoE type, possibly creating one if necessary.
* Returns NULL on failure or a reference to interface.
*/
static struct ecm_db_iface_instance *ecm_interface_pppoe_interface_establish(struct ecm_db_interface_info_pppoe *type_info,
char *dev_name, int32_t dev_interface_num, int32_t ae_interface_num, int32_t mtu)
{
struct ecm_db_iface_instance *nii;
struct ecm_db_iface_instance *ii;
DEBUG_INFO("Establish PPPoE iface: %s with session id: %u, remote mac: %pM, MTU: %d, if num: %d, accel engine if id: %d\n",
dev_name, type_info->pppoe_session_id, type_info->remote_mac, mtu, dev_interface_num, ae_interface_num);
/*
* Locate the iface
*/
ii = ecm_db_iface_find_and_ref_pppoe(type_info->pppoe_session_id, type_info->remote_mac);
if (ii) {
DEBUG_TRACE("%px: iface established\n", ii);
return ii;
}
/*
* No iface - create one
*/
nii = ecm_db_iface_alloc();
if (!nii) {
DEBUG_WARN("Failed to establish iface\n");
return NULL;
}
/*
* Add iface into the database, atomically to avoid races creating the same thing
*/
spin_lock_bh(&ecm_interface_lock);
ii = ecm_db_iface_find_and_ref_pppoe(type_info->pppoe_session_id, type_info->remote_mac);
if (ii) {
spin_unlock_bh(&ecm_interface_lock);
ecm_db_iface_deref(nii);
return ii;
}
ecm_db_iface_add_pppoe(nii, type_info->pppoe_session_id, type_info->remote_mac, dev_name,
mtu, dev_interface_num, ae_interface_num, NULL, nii);
spin_unlock_bh(&ecm_interface_lock);
DEBUG_TRACE("%px: pppoe iface established\n", nii);
return nii;
}
#endif
#ifdef ECM_INTERFACE_MAP_T_ENABLE
/*
* ecm_interface_map_t_interface_establish()
* Returns a reference to a iface of the PPPoE type, possibly creating one if necessary.
* Returns NULL on failure or a reference to interface.
*/
static struct ecm_db_iface_instance *ecm_interface_map_t_interface_establish(struct ecm_db_interface_info_map_t *type_info,
char *dev_name, int32_t dev_interface_num, int32_t ae_interface_num, int32_t mtu)
{
struct ecm_db_iface_instance *nii;
struct ecm_db_iface_instance *ii;
DEBUG_TRACE("Establish MAP-T iface: %s MTU: %d, if num: %d, accel engine if id: %d\n",
dev_name, mtu, dev_interface_num, ae_interface_num);
/*
* Locate the iface
*/
ii = ecm_db_iface_find_and_ref_map_t(type_info->if_index, ae_interface_num);
if (ii) {
DEBUG_TRACE("%px: iface established\n", ii);
return ii;
}
/*
* No iface - create one
*/
nii = ecm_db_iface_alloc();
if (!nii) {
DEBUG_WARN("Failed to establish iface\n");
return NULL;
}
/*
* Add iface into the database, atomically to avoid races creating the same thing
*/
spin_lock_bh(&ecm_interface_lock);
ii = ecm_db_iface_find_and_ref_map_t(type_info->if_index, ae_interface_num);
if (ii) {
spin_unlock_bh(&ecm_interface_lock);
ecm_db_iface_deref(nii);
return ii;
}
ecm_db_iface_add_map_t(nii, type_info, dev_name,
mtu, dev_interface_num, ae_interface_num, NULL, nii);
spin_unlock_bh(&ecm_interface_lock);
DEBUG_TRACE("%px: map_t iface established\n", nii);
return nii;
}
#endif
#ifdef ECM_INTERFACE_L2TPV2_ENABLE
/*
* ecm_interface_pppol2tpv2_interface_establish()
* Returns a reference to a iface of the PPPoL2TPV2 type, possibly creating one if necessary.
* Returns NULL on failure or a reference to interface.
*/
static struct ecm_db_iface_instance *ecm_interface_pppol2tpv2_interface_establish(struct ecm_db_interface_info_pppol2tpv2 *type_info,
char *dev_name, int32_t dev_interface_num, int32_t ae_interface_num, int32_t mtu)
{
struct ecm_db_iface_instance *nii;
struct ecm_db_iface_instance *ii;
DEBUG_INFO("Establish PPPol2tp iface: %s with tunnel id=%u session id %u\n", dev_name, type_info->l2tp.tunnel.tunnel_id,
type_info->l2tp.session.session_id);
/*
* Locate the iface
*/
ii = ecm_db_iface_find_and_ref_pppol2tpv2(type_info->l2tp.tunnel.tunnel_id, type_info->l2tp.session.session_id);
if (ii) {
DEBUG_TRACE("%px: iface established\n", ii);
ecm_db_iface_update_ae_interface_identifier(ii, ae_interface_num);
return ii;
}
/*
* No iface - create one
*/
nii = ecm_db_iface_alloc();
if (!nii) {
DEBUG_WARN("Failed to establish iface\n");
return NULL;
}
/*
* Add iface into the database, atomically to avoid races creating the same thing
*/
spin_lock_bh(&ecm_interface_lock);
ii = ecm_db_iface_find_and_ref_pppol2tpv2(type_info->l2tp.tunnel.tunnel_id, type_info->l2tp.session.session_id);
if (ii) {
spin_unlock_bh(&ecm_interface_lock);
ecm_db_iface_deref(nii);
ecm_db_iface_update_ae_interface_identifier(ii, ae_interface_num);
return ii;
}
ecm_db_iface_add_pppol2tpv2(nii, type_info, dev_name, mtu, dev_interface_num, ae_interface_num, NULL, nii);
spin_unlock_bh(&ecm_interface_lock);
DEBUG_TRACE("%px: pppol2tpv2 iface established\n", nii);
return nii;
}
#endif
#ifdef ECM_INTERFACE_PPTP_ENABLE
/*
* ecm_interface_pptp_interface_establish()
* Returns a reference to a iface of the PPTP type, possibly creating one if necessary.
* Returns NULL on failure or a reference to interface.
*/
static struct ecm_db_iface_instance *ecm_interface_pptp_interface_establish(struct ecm_db_interface_info_pptp *type_info,
char *dev_name, int32_t dev_interface_num, int32_t ae_interface_num, int32_t mtu)
{
struct ecm_db_iface_instance *nii;
struct ecm_db_iface_instance *ii;
DEBUG_INFO("Establish PPTP iface: %s with local call id %u peer call id %u\n", dev_name, type_info->src_call_id,
type_info->dst_call_id);
/*
* Locate the iface
*/
ii = ecm_db_iface_find_and_ref_pptp(type_info->src_call_id, type_info->dst_call_id, ae_interface_num);
if (ii) {
DEBUG_TRACE("%px: iface established\n", ii);
ecm_db_iface_update_ae_interface_identifier(ii, ae_interface_num);
return ii;
}
/*
* No iface - create one
*/
nii = ecm_db_iface_alloc();
if (!nii) {
DEBUG_WARN("Failed to establish iface\n");
return NULL;
}
/*
* Add iface into the database, atomically to avoid races creating the same thing
*/
spin_lock_bh(&ecm_interface_lock);
ii = ecm_db_iface_find_and_ref_pptp(type_info->src_call_id, type_info->dst_call_id, ae_interface_num);
if (ii) {
spin_unlock_bh(&ecm_interface_lock);
ecm_db_iface_deref(nii);
ecm_db_iface_update_ae_interface_identifier(ii, ae_interface_num);
return ii;
}
ecm_db_iface_add_pptp(nii, type_info, dev_name, mtu, dev_interface_num, ae_interface_num, NULL, nii);
spin_unlock_bh(&ecm_interface_lock);
DEBUG_TRACE("%px: pptp iface established\n", nii);
return nii;
}
#endif
#ifdef ECM_INTERFACE_GRE_TUN_ENABLE
/*
* ecm_interface_gre_tun_interface_establish()
* Returns a reference to a iface of the gre type, possibly creating one if necessary.
* Returns NULL on failure or a reference to interface.
*/
static struct ecm_db_iface_instance *ecm_interface_gre_tun_interface_establish(struct ecm_db_interface_info_gre_tun *type_info,
char *dev_name, int32_t dev_interface_num, int32_t ae_interface_num, int32_t mtu)
{
struct ecm_db_iface_instance *nii;
struct ecm_db_iface_instance *ii;
DEBUG_TRACE("Establish GRE TUN iface: %s MTU: %d, if num: %d, accel engine if id: %d\n",
dev_name, mtu, dev_interface_num, ae_interface_num);
/*
* Locate the iface
*/
ii = ecm_db_iface_find_and_ref_gre_tun(type_info->if_index, ae_interface_num);
if (ii) {
DEBUG_TRACE("%px: iface established\n", ii);
return ii;
}
/*
* No iface - create one
*/
nii = ecm_db_iface_alloc();
if (!nii) {
DEBUG_WARN("Failed to establish iface\n");
return NULL;
}
/*
* Add iface into the database, atomically to avoid races creating the same thing
*/
spin_lock_bh(&ecm_interface_lock);
ii = ecm_db_iface_find_and_ref_gre_tun(type_info->if_index, ae_interface_num);
if (ii) {
spin_unlock_bh(&ecm_interface_lock);
ecm_db_iface_deref(nii);
return ii;
}
ecm_db_iface_add_gre_tun(nii, type_info, dev_name,
mtu, dev_interface_num, ae_interface_num, NULL, nii);
spin_unlock_bh(&ecm_interface_lock);
DEBUG_TRACE("%px: gre iface established\n", nii);
return nii;
}
#endif
/*
* ecm_interface_unknown_interface_establish()
* Returns a reference to a iface of the UNKNOWN type, possibly creating one if necessary.
* Returns NULL on failure or a reference to interface.
*/
static struct ecm_db_iface_instance *ecm_interface_unknown_interface_establish(struct ecm_db_interface_info_unknown *type_info,
char *dev_name, int32_t dev_interface_num, int32_t ae_interface_num, int32_t mtu)
{
struct ecm_db_iface_instance *nii;
struct ecm_db_iface_instance *ii;
DEBUG_INFO("Establish UNKNOWN iface: %s with os_specific_ident: %u, MTU: %d, if num: %d, accel engine if id: %d\n",
dev_name, type_info->os_specific_ident, mtu, dev_interface_num, ae_interface_num);
/*
* Locate the iface
*/
ii = ecm_db_iface_find_and_ref_unknown(type_info->os_specific_ident);
if (ii) {
DEBUG_TRACE("%px: iface established\n", ii);
return ii;
}
/*
* No iface - create one
*/
nii = ecm_db_iface_alloc();
if (!nii) {
DEBUG_WARN("Failed to establish iface\n");
return NULL;
}
/*
* Add iface into the database, atomically to avoid races creating the same thing
*/
spin_lock_bh(&ecm_interface_lock);
ii = ecm_db_iface_find_and_ref_unknown(type_info->os_specific_ident);
if (ii) {
spin_unlock_bh(&ecm_interface_lock);
ecm_db_iface_deref(nii);
return ii;
}
ecm_db_iface_add_unknown(nii, type_info->os_specific_ident, dev_name,
mtu, dev_interface_num, ae_interface_num, NULL, nii);
spin_unlock_bh(&ecm_interface_lock);
DEBUG_TRACE("%px: unknown iface established\n", nii);
return nii;
}
/*
* ecm_interface_loopback_interface_establish()
* Returns a reference to a iface of the LOOPBACK type, possibly creating one if necessary.
* Returns NULL on failure or a reference to interface.
*/
static struct ecm_db_iface_instance *ecm_interface_loopback_interface_establish(struct ecm_db_interface_info_loopback *type_info,
char *dev_name, int32_t dev_interface_num, int32_t ae_interface_num, int32_t mtu)
{
struct ecm_db_iface_instance *nii;
struct ecm_db_iface_instance *ii;
DEBUG_INFO("Establish LOOPBACK iface: %s with os_specific_ident: %u, MTU: %d, if num: %d, accel engine if id: %d\n",
dev_name, type_info->os_specific_ident, mtu, dev_interface_num, ae_interface_num);
/*
* Locate the iface
*/
ii = ecm_db_iface_find_and_ref_loopback(type_info->os_specific_ident);
if (ii) {
DEBUG_TRACE("%px: iface established\n", ii);
return ii;
}
/*
* No iface - create one
*/
nii = ecm_db_iface_alloc();
if (!nii) {
DEBUG_WARN("Failed to establish iface\n");
return NULL;
}
/*
* Add iface into the database, atomically to avoid races creating the same thing
*/
spin_lock_bh(&ecm_interface_lock);
ii = ecm_db_iface_find_and_ref_loopback(type_info->os_specific_ident);
if (ii) {
spin_unlock_bh(&ecm_interface_lock);
ecm_db_iface_deref(nii);
return ii;
}
ecm_db_iface_add_loopback(nii, type_info->os_specific_ident, dev_name,
mtu, dev_interface_num, ae_interface_num, NULL, nii);
spin_unlock_bh(&ecm_interface_lock);
DEBUG_TRACE("%px: loopback iface established\n", nii);
return nii;
}
#ifdef ECM_INTERFACE_IPSEC_ENABLE
/*
* ecm_interface_ipsec_tunnel_interface_establish()
* Returns a reference to a iface of the IPSEC_TUNNEL type, possibly creating one if necessary.
* Returns NULL on failure or a reference to interface.
*
* NOTE: GGG TODO THIS NEEDS TO TAKE A PROPER APPROACH TO IPSEC TUNNELS USING ENDPOINT ADDRESSING AS THE TYPE INFO KEYS
*/
static struct ecm_db_iface_instance *ecm_interface_ipsec_tunnel_interface_establish(struct ecm_db_interface_info_ipsec_tunnel *type_info,
char *dev_name, int32_t dev_interface_num, int32_t ae_interface_num, int32_t mtu)
{
struct ecm_db_iface_instance *nii;
struct ecm_db_iface_instance *ii;
DEBUG_INFO("Establish IPSEC_TUNNEL iface: %s with os_specific_ident: %u, MTU: %d, if num: %d, accel engine if id: %d\n",
dev_name, type_info->os_specific_ident, mtu, dev_interface_num, ae_interface_num);
/*
* Locate the iface
*/
ii = ecm_db_iface_find_and_ref_ipsec_tunnel(type_info->os_specific_ident, ae_interface_num);
if (ii) {
DEBUG_TRACE("%px: iface established\n", ii);
return ii;
}
/*
* No iface - create one
*/
nii = ecm_db_iface_alloc();
if (!nii) {
DEBUG_WARN("Failed to establish iface\n");
return NULL;
}
/*
* Add iface into the database, atomically to avoid races creating the same thing
*/
spin_lock_bh(&ecm_interface_lock);
ii = ecm_db_iface_find_and_ref_ipsec_tunnel(type_info->os_specific_ident, ae_interface_num);
if (ii) {
spin_unlock_bh(&ecm_interface_lock);
ecm_db_iface_deref(nii);
return ii;
}
ecm_db_iface_add_ipsec_tunnel(nii, type_info->os_specific_ident, dev_name,
mtu, dev_interface_num, ae_interface_num, NULL, nii);
spin_unlock_bh(&ecm_interface_lock);
DEBUG_TRACE("%px: ipsec_tunnel iface established\n", nii);
return nii;
}
#endif
#ifdef ECM_INTERFACE_SIT_ENABLE
#ifdef CONFIG_IPV6_SIT_6RD
/*
* ecm_interface_sit_interface_establish()
* Returns a reference to a iface of the SIT type, possibly creating one if necessary.
* Returns NULL on failure or a reference to interface.
*/
static struct ecm_db_iface_instance *ecm_interface_sit_interface_establish(struct ecm_db_interface_info_sit *type_info,
char *dev_name, int32_t dev_interface_num, int32_t ae_interface_num, int32_t mtu)
{
struct ecm_db_iface_instance *nii;
struct ecm_db_iface_instance *ii;
DEBUG_INFO("Establish SIT iface: %s with saddr: " ECM_IP_ADDR_OCTAL_FMT ", daddr: " ECM_IP_ADDR_OCTAL_FMT ", MTU: %d, if num: %d, accel engine if id: %d\n",
dev_name, ECM_IP_ADDR_TO_OCTAL(type_info->saddr), ECM_IP_ADDR_TO_OCTAL(type_info->daddr), mtu, dev_interface_num, ae_interface_num);
/*
* Locate the iface
*/
ii = ecm_db_iface_find_and_ref_sit(type_info->saddr, type_info->daddr, ae_interface_num);
if (ii) {
DEBUG_TRACE("%px: iface established\n", ii);
return ii;
}
/*
* No iface - create one
*/
nii = ecm_db_iface_alloc();
if (!nii) {
DEBUG_WARN("Failed to establish iface\n");
return NULL;
}
/*
* Add iface into the database, atomically to avoid races creating the same thing
*/
spin_lock_bh(&ecm_interface_lock);
ii = ecm_db_iface_find_and_ref_sit(type_info->saddr, type_info->daddr, ae_interface_num);
if (ii) {
spin_unlock_bh(&ecm_interface_lock);
ecm_db_iface_deref(nii);
return ii;
}
ecm_db_iface_add_sit(nii, type_info, dev_name, mtu, dev_interface_num,
ae_interface_num, NULL, nii);
spin_unlock_bh(&ecm_interface_lock);
DEBUG_TRACE("%px: sit iface established\n", nii);
return nii;
}
#endif
#endif
#ifdef ECM_INTERFACE_TUNIPIP6_ENABLE
#ifdef ECM_IPV6_ENABLE
/*
* ecm_interface_tunipip6_interface_establish()
* Returns a reference to a iface of the TUNIPIP6 type, possibly creating one if necessary.
* Returns NULL on failure or a reference to interface.
*/
static struct ecm_db_iface_instance *ecm_interface_tunipip6_interface_establish(struct ecm_db_interface_info_tunipip6 *type_info,
char *dev_name, int32_t dev_interface_num, int32_t ae_interface_num, int32_t mtu)
{
struct ecm_db_iface_instance *nii;
struct ecm_db_iface_instance *ii;
DEBUG_INFO("Establish TUNIPIP6 iface: %s with saddr: " ECM_IP_ADDR_OCTAL_FMT ", daddr: " ECM_IP_ADDR_OCTAL_FMT ", MTU: %d, if num: %d, accel engine if id: %d\n",
dev_name, ECM_IP_ADDR_TO_OCTAL(type_info->saddr), ECM_IP_ADDR_TO_OCTAL(type_info->daddr), mtu, dev_interface_num, ae_interface_num);
/*
* Locate the iface
*/
ii = ecm_db_iface_find_and_ref_tunipip6(type_info->saddr, type_info->daddr, ae_interface_num);
if (ii) {
DEBUG_TRACE("%px: iface established\n", ii);
return ii;
}
/*
* No iface - create one
*/
nii = ecm_db_iface_alloc();
if (!nii) {
DEBUG_WARN("Failed to establish iface\n");
return NULL;
}
/*
* Add iface into the database, atomically to avoid races creating the same thing
*/
spin_lock_bh(&ecm_interface_lock);
ii = ecm_db_iface_find_and_ref_tunipip6(type_info->saddr, type_info->daddr, ae_interface_num);
if (ii) {
spin_unlock_bh(&ecm_interface_lock);
ecm_db_iface_deref(nii);
return ii;
}
ecm_db_iface_add_tunipip6(nii, type_info, dev_name, mtu, dev_interface_num,
ae_interface_num, NULL, nii);
spin_unlock_bh(&ecm_interface_lock);
DEBUG_TRACE("%px: tunipip6 iface established\n", nii);
return nii;
}
#endif
#endif
#ifdef ECM_INTERFACE_RAWIP_ENABLE
/*
* ecm_interface_rawip_interface_establish()
* Returns a reference to a iface of the RAWIP type, possibly creating one if necessary.
* Returns NULL on failure or a reference to interface.
*/
static struct ecm_db_iface_instance *ecm_interface_rawip_interface_establish(struct ecm_db_interface_info_rawip *type_info,
char *dev_name, int32_t dev_interface_num, int32_t ae_interface_num, int32_t mtu)
{
struct ecm_db_iface_instance *nii;
struct ecm_db_iface_instance *ii;
DEBUG_INFO("Establish RAWIP iface: %s with address: %pM, MTU: %d, if num: %d, accel engine if id: %d\n",
dev_name, type_info->address, mtu, dev_interface_num, ae_interface_num);
/*
* Locate the iface
*/
ii = ecm_db_iface_find_and_ref_rawip(type_info->address);
if (ii) {
DEBUG_TRACE("%px: RAWIP iface already established\n", ii);
return ii;
}
/*
* No iface - create one
*/
nii = ecm_db_iface_alloc();
if (!nii) {
DEBUG_WARN("Failed to establish RAWIP iface\n");
return NULL;
}
/*
* Add iface into the database, atomically to avoid races creating the same thing
*/
spin_lock_bh(&ecm_interface_lock);
ii = ecm_db_iface_find_and_ref_rawip(type_info->address);
if (ii) {
spin_unlock_bh(&ecm_interface_lock);
ecm_db_iface_deref(nii);
return ii;
}
ecm_db_iface_add_rawip(nii, type_info->address, dev_name,
mtu, dev_interface_num, ae_interface_num, NULL, nii);
spin_unlock_bh(&ecm_interface_lock);
DEBUG_TRACE("%px: RAWIP iface established\n", nii);
return nii;
}
#endif
#ifdef ECM_INTERFACE_OVPN_ENABLE
/*
* ecm_interface_ovpn_interface_establish()
* Returns reference to iface of the OVPN type.
*/
static struct ecm_db_iface_instance *ecm_interface_ovpn_interface_establish(struct ecm_db_interface_info_ovpn *type_info,
char *dev_name, int32_t dev_interface_num, int32_t mtu)
{
struct ecm_db_iface_instance *nii;
struct ecm_db_iface_instance *ii;
DEBUG_INFO("Establish OVPN iface: %s with ae_interface_num : %d, MTU: %d, if num: %d\n",
dev_name, type_info->tun_ifnum, mtu, dev_interface_num);
/*
* Locate the iface
*/
ii = ecm_db_iface_find_and_ref_ovpn(type_info->tun_ifnum);
if (ii) {
DEBUG_TRACE("%px: iface established\n", ii);
return ii;
}
/*
* No iface - create one
*/
nii = ecm_db_iface_alloc();
if (!nii) {
DEBUG_WARN("Failed to establish iface\n");
return NULL;
}
/*
* Add iface into the database, atomically to avoid races creating the same thing
*/
spin_lock_bh(&ecm_interface_lock);
ii = ecm_db_iface_find_and_ref_ovpn(type_info->tun_ifnum);
if (ii) {
spin_unlock_bh(&ecm_interface_lock);
ecm_db_iface_deref(nii);
return ii;
}
ecm_db_iface_add_ovpn(nii, type_info, dev_name, mtu, dev_interface_num, NULL, nii);
spin_unlock_bh(&ecm_interface_lock);
DEBUG_TRACE("%px: ovpn iface established\n", nii);
return nii;
}
#endif
#ifdef ECM_INTERFACE_VXLAN_ENABLE
/*
* ecm_interface_vxlan_interface_establish()
* Returns a reference to a iface of the VxLAN type, possibly creating one if necessary.
* Returns NULL on failure or a reference to interface.
*/
static struct ecm_db_iface_instance *ecm_interface_vxlan_interface_establish(struct ecm_db_interface_info_vxlan *type_info,
char *dev_name, int32_t dev_interface_num, int32_t ae_interface_num, int32_t mtu)
{
struct ecm_db_iface_instance *nii;
struct ecm_db_iface_instance *ii;
DEBUG_INFO("Establish VxLAN iface: %s with vxlan id: %u, MTU: %d, if num: %d, if_type: %d, accel engine if id: %d\n",
dev_name, type_info->vni, mtu, dev_interface_num, type_info->if_type, ae_interface_num);
/*
* Locate the iface
*/
ii = ecm_db_iface_find_and_ref_vxlan(type_info->vni, type_info->if_type);
if (ii) {
DEBUG_TRACE("%px: vxlan iface established\n", ii);
/*
* Update the accel engine interface identifier, just in case it was changed.
*/
ecm_db_iface_update_ae_interface_identifier(ii, ae_interface_num);
return ii;
}
/*
* No iface - create one
*/
nii = ecm_db_iface_alloc();
if (!nii) {
DEBUG_WARN("Failed to establish iface\n");
return NULL;
}
/*
* Add iface into the database, atomically to avoid races creating the same thing
*/
spin_lock_bh(&ecm_interface_lock);
ii = ecm_db_iface_find_and_ref_vxlan(type_info->vni, type_info->if_type);
if (ii) {
spin_unlock_bh(&ecm_interface_lock);
ecm_db_iface_deref(nii);
ecm_db_iface_update_ae_interface_identifier(ii, ae_interface_num);
DEBUG_TRACE("%px: vxlan iface established\n", ii);
return ii;
}
ecm_db_iface_add_vxlan(nii, type_info->vni, type_info->if_type, dev_name, mtu,
dev_interface_num, ae_interface_num, NULL, nii);
spin_unlock_bh(&ecm_interface_lock);
DEBUG_TRACE("%px: vxlan iface established\n", nii);
return nii;
}
#endif
/*
* ecm_interface_tunnel_mtu_update()
* Update mtu if the flow is a tunneled packet.
*/
bool ecm_interface_tunnel_mtu_update(ip_addr_t saddr, ip_addr_t daddr, ecm_db_iface_type_t type, int32_t *mtu)
{
struct net_device *src_dev;
struct net_device *dest_dev;
bool ret = true;
/*
* Check if source IP is local address
*/
src_dev = ecm_interface_dev_find_by_local_addr(saddr);
dest_dev = ecm_interface_dev_find_by_local_addr(daddr);
switch (type) {
case ECM_DB_IFACE_TYPE_OVPN:
if (src_dev) {
*mtu = src_dev->mtu;
} else if (dest_dev) {
*mtu = dest_dev->mtu;
} else {
return false;
}
break;
case ECM_DB_IFACE_TYPE_PPTP:
case ECM_DB_IFACE_TYPE_PPPOL2TPV2:
case ECM_DB_IFACE_TYPE_GRE_TUN:
case ECM_DB_IFACE_TYPE_GRE_TAP:
case ECM_DB_IFACE_TYPE_VXLAN:
case ECM_DB_IFACE_TYPE_IPSEC_TUNNEL:
if (src_dev) {
*mtu = src_dev->mtu;
} else {
ret = false;
goto done;
}
break;
default:
ret = false;
DEBUG_WARN("Tunnel type doesn't need to update MTU value\n");
break;
}
done:
if (src_dev) {
dev_put(src_dev);
}
if (dest_dev) {
dev_put(dest_dev);
}
return ret;
}
#ifdef ECM_INTERFACE_OVS_BRIDGE_ENABLE
/*
* ecm_interface_ovs_bridge_port_dev_get_and_ref()
* Looks up the slave port in the bridge devices port list.
*/
static struct net_device *ecm_interface_ovs_bridge_port_dev_get_and_ref(struct sk_buff *skb, struct net_device *br_dev,
ip_addr_t src_ip, ip_addr_t dst_ip, int ip_version,
int protocol, bool is_routed, uint8_t *smac,
uint8_t *dmac, __be16 *layer4hdr,
struct ecm_front_end_ovs_params *op)
{
struct ovsmgr_dp_flow flow;
struct ovsmgr_dp_flow return_flow;
struct net_device *dev;
memset(&flow, 0, sizeof(flow));
flow.indev = br_dev;
flow.outdev = NULL;
flow.tuple.ip_version = ip_version;
flow.tuple.protocol = protocol;
flow.is_routed = is_routed;
if (!smac) {
ether_addr_copy(flow.smac, br_dev->dev_addr);
}
ether_addr_copy(flow.dmac, dmac);
/*
* Consider a routing flow
* eth1-ovsbr1----->ovsbr2-eth2
* The 2 ovs data path rules should look like the following,
* 1. ingress port:eth1, egress_port:ovsbr1
* 2. ingress_port:ovsbr2, egress_port:eth2
*
* Copy the multicast mac address, if src_ip is multicast.
* During multicast 'from' hierarchy creation, the ECM
* copies the source MAC as multicast MAC as the
* reverse direction rule is not present in the ovs
* data path rule set.
*/
if (ecm_ip_addr_is_multicast(src_ip)) {
struct ethhdr *skb_eth_hdr;
skb_eth_hdr = eth_hdr(skb);
ether_addr_copy(flow.smac, dmac);
ether_addr_copy(flow.dmac, skb_eth_hdr->h_dest);
if (protocol == IPPROTO_UDP) {
struct udphdr *udp_hdr = (struct udphdr *)layer4hdr;
flow.tuple.src_port = udp_hdr->source;
flow.tuple.dst_port = udp_hdr->dest;
} else {
DEBUG_WARN("%px: Protocol is not UDP\n", skb);
return NULL;
}
if (ip_version == 4) {
ECM_IP_ADDR_TO_NIN4_ADDR(flow.tuple.ipv4.src, dst_ip);
ECM_IP_ADDR_TO_NIN4_ADDR(flow.tuple.ipv4.dst, src_ip);
DEBUG_TRACE("%px: br_dev: %s, src_addr: %pI4, dest_addr: %pI4, ip_version: %d, protocol: %d (sp:%d, dp:%d) (smac:%pM, dmac:%pM)\n",
skb, br_dev->name, &flow.tuple.ipv4.src, &flow.tuple.ipv4.dst,
ip_version, protocol, flow.tuple.src_port, flow.tuple.dst_port, flow.smac, flow.dmac);
} else {
ECM_IP_ADDR_TO_NIN6_ADDR(flow.tuple.ipv6.src, dst_ip);
ECM_IP_ADDR_TO_NIN6_ADDR(flow.tuple.ipv6.dst, src_ip);
DEBUG_TRACE("%px: br_dev: %s, src_addr: %pI6, dest_addr: %pI6, ip_version: %d, protocol: %d (sp:%d, dp:%d) (smac:%pM, dmac:%pM)\n",
skb, br_dev->name, &flow.tuple.ipv6.src, &flow.tuple.ipv6.dst,
ip_version, protocol, flow.tuple.src_port, flow.tuple.dst_port, flow.smac, flow.dmac);
}
goto port_find;
}
/*
* OVS parameters are not passed explicitly for the following cases:
* 1. IPv6 flows
* 2. IPv4/IPv6 non-ported flows
* 3. Multicast flows.
* 4. SFE flows
*/
if (!op) {
if (protocol == IPPROTO_TCP) {
struct tcphdr *tcp_hdr = (struct tcphdr *)layer4hdr;
flow.tuple.src_port = tcp_hdr->source;
flow.tuple.dst_port = tcp_hdr->dest;
} else if (protocol == IPPROTO_UDP) {
struct udphdr *udp_hdr = (struct udphdr *)layer4hdr;
flow.tuple.src_port = udp_hdr->source;
flow.tuple.dst_port = udp_hdr->dest;
} else {
DEBUG_WARN("%px: Protocol is not udp/tcp\n", skb);
return NULL;
}
if (ip_version == 4) {
ECM_IP_ADDR_TO_NIN4_ADDR(flow.tuple.ipv4.src, src_ip);
ECM_IP_ADDR_TO_NIN4_ADDR(flow.tuple.ipv4.dst, dst_ip);
DEBUG_TRACE("%px: br_dev: %s, src_addr: %pI4, dest_addr: %pI4, ip_version: %d, protocol: %d (sp:%d, dp:%d) (smac:%pM, dmac:%pM)\n",
skb, br_dev->name, &flow.tuple.ipv4.src, &flow.tuple.ipv4.dst,
ip_version, protocol, flow.tuple.src_port, flow.tuple.dst_port, flow.smac, flow.dmac);
} else {
ECM_IP_ADDR_TO_NIN6_ADDR(flow.tuple.ipv6.src, src_ip);
ECM_IP_ADDR_TO_NIN6_ADDR(flow.tuple.ipv6.dst, dst_ip);
DEBUG_TRACE("%px: br_dev: %s, src_addr: %pI6, dest_addr: %pI6, ip_version: %d, protocol: %d (sp:%d, dp:%d) (smac:%pM, dmac:%pM)\n",
skb, br_dev->name, &flow.tuple.ipv6.src, &flow.tuple.ipv6.dst,
ip_version, protocol, flow.tuple.src_port, flow.tuple.dst_port, flow.smac, flow.dmac);
}
goto port_find;
}
/*
* We use OVS params for IPv4 NSS unicast flows.
*/
if ((protocol == IPPROTO_TCP) || (protocol == IPPROTO_UDP)) {
flow.tuple.src_port = htons(op->src_port);
flow.tuple.dst_port = htons(op->dest_port);
} else {
DEBUG_WARN("%px: Protocol is not udp/tcp\n", skb);
return NULL;
}
if (ip_version == 4) {
ECM_IP_ADDR_TO_NIN4_ADDR(flow.tuple.ipv4.src, op->src_ip);
ECM_IP_ADDR_TO_NIN4_ADDR(flow.tuple.ipv4.dst, op->dest_ip);
DEBUG_TRACE("%px: br_dev: %s, src_addr: %pI4, dest_addr: %pI4, ip_version: %d, protocol: %d (sp:%d, dp:%d) (smac:%pM, dmac:%pM)\n",
skb, br_dev->name, &flow.tuple.ipv4.src, &flow.tuple.ipv4.dst,
ip_version, protocol, flow.tuple.src_port, flow.tuple.dst_port, flow.smac, flow.dmac);
} else {
ECM_IP_ADDR_TO_NIN6_ADDR(flow.tuple.ipv6.src, op->src_ip);
ECM_IP_ADDR_TO_NIN6_ADDR(flow.tuple.ipv6.dst, op->dest_ip);
DEBUG_TRACE("%px: br_dev: %s, src_addr: %pI6, dest_addr: %pI6, ip_version: %d, protocol: %d (sp:%d, dp:%d) (smac:%pM, dmac:%pM)\n",
skb, br_dev->name, &flow.tuple.ipv6.src, &flow.tuple.ipv6.dst,
ip_version, protocol, flow.tuple.src_port, flow.tuple.dst_port, flow.smac, flow.dmac);
}
port_find:
dev = ovsmgr_port_find(skb, br_dev, &flow);
if (dev) {
DEBUG_TRACE("OVS egress port dev: %s\n", dev->name);
dev_hold(dev);
return dev;
}
/*
* Handle Multicast flows separately.
*/
if (ecm_ip_addr_is_multicast(src_ip)) {
dev = ovsmgr_port_find_by_mac(skb, br_dev, &flow);
if (!dev) {
DEBUG_WARN("%px: Couldn't find OVS bridge port for Multicast flow.\n", skb);
return NULL;
}
dev_hold(dev);
return dev;
}
/*
* Find by MAC addresses using return flow
*/
return_flow.indev = NULL;
return_flow.outdev = br_dev;
return_flow.tuple.ip_version = flow.tuple.ip_version;
return_flow.tuple.protocol = flow.tuple.protocol;
return_flow.is_routed = flow.is_routed;
ether_addr_copy(return_flow.smac, flow.dmac);
ether_addr_copy(return_flow.dmac, flow.smac);
return_flow.tuple.src_port = flow.tuple.dst_port;
return_flow.tuple.dst_port = flow.tuple.src_port;
if (ip_version == 4) {
return_flow.tuple.ipv4.src = flow.tuple.ipv4.dst;
return_flow.tuple.ipv4.dst = flow.tuple.ipv4.src;
DEBUG_TRACE("%px: br_dev = %s, src_addr: %pI4, dest_addr: %pI4, ip_version: %d, protocol: %d (sp:%d, dp:%d) (smac:%pM, dmac:%pM)\n",
skb, br_dev->name, &return_flow.tuple.ipv4.src,
&return_flow.tuple.ipv4.dst, return_flow.tuple.ip_version,
return_flow.tuple.protocol, return_flow.tuple.src_port, return_flow.tuple.dst_port,
return_flow.smac, return_flow.dmac);
} else {
memcpy(&return_flow.tuple.ipv6.src, &flow.tuple.ipv6.dst, sizeof(return_flow.tuple.ipv6.src));
memcpy(&return_flow.tuple.ipv6.dst, &flow.tuple.ipv6.src, sizeof(return_flow.tuple.ipv6.dst));
DEBUG_TRACE("%px: br_dev = %s, src_addr: %pI6, dest_addr: %pI6, ip_version: %d, protocol: %d (sp:%d, dp:%d) (smac:%pM, dmac:%pM)\n",
skb, br_dev->name, &return_flow.tuple.ipv4.src,
&return_flow.tuple.ipv4.dst, return_flow.tuple.ip_version,
return_flow.tuple.protocol, return_flow.tuple.src_port, return_flow.tuple.dst_port,
return_flow.smac, return_flow.dmac);
}
dev = ovsmgr_port_find_by_mac(skb, br_dev, &return_flow);
if (!dev) {
DEBUG_WARN("%px: Couldn't find OVS bridge port\n", skb);
return NULL;
}
dev_hold(dev);
return dev;
}
#endif
#ifdef ECM_INTERFACE_MACVLAN_ENABLE
/*
* ecm_interface_macvlan_mode_is_valid()
* Check if the macvlan interface allowed for acceleration.
*/
static bool ecm_interface_macvlan_mode_is_valid(struct net_device *dev)
{
enum macvlan_mode mode = macvlan_get_mode(dev);
/*
* Allow acceleration for only "Private" mode.
*/
if (mode == MACVLAN_MODE_PRIVATE) {
return true;
}
DEBUG_WARN("%px: MACVLAN dev: %s, MACVLAN mode: %d is not supported for acceleration\n", dev,
dev->name, mode);
return false;
}
#endif
/*
* ecm_interface_establish_and_ref()
* Establish an interface instance for the given interface detail.
*/
struct ecm_db_iface_instance *ecm_interface_establish_and_ref(struct ecm_front_end_connection_instance *feci,
struct net_device *dev, struct sk_buff *skb)
{
int32_t dev_interface_num;
char *dev_name;
int32_t dev_type;
int32_t dev_mtu;
int32_t ae_interface_num;
struct ecm_db_iface_instance *ii;
int32_t interface_type __attribute__((unused));
union {
struct ecm_db_interface_info_ethernet ethernet; /* type == ECM_DB_IFACE_TYPE_ETHERNET */
#ifdef ECM_INTERFACE_VLAN_ENABLE
struct ecm_db_interface_info_vlan vlan; /* type == ECM_DB_IFACE_TYPE_VLAN */
#endif
#ifdef ECM_INTERFACE_MACVLAN_ENABLE
struct ecm_db_interface_info_macvlan macvlan; /* type == ECM_DB_IFACE_TYPE_MACVLAN */
#endif
#ifdef ECM_INTERFACE_BOND_ENABLE
struct ecm_db_interface_info_lag lag; /* type == ECM_DB_IFACE_TYPE_LAG */
#endif
struct ecm_db_interface_info_bridge bridge; /* type == ECM_DB_IFACE_TYPE_BRIDGE */
#ifdef ECM_INTERFACE_PPPOE_ENABLE
struct ecm_db_interface_info_pppoe pppoe; /* type == ECM_DB_IFACE_TYPE_PPPOE */
#endif
#ifdef ECM_INTERFACE_L2TPV2_ENABLE
struct ecm_db_interface_info_pppol2tpv2 pppol2tpv2; /* type == ECM_DB_IFACE_TYPE_PPPOL2TPV2 */
#endif
#ifdef ECM_INTERFACE_PPTP_ENABLE
struct ecm_db_interface_info_pptp pptp; /* type == ECM_DB_IFACE_TYPE_PPTP */
#endif
#ifdef ECM_INTERFACE_MAP_T_ENABLE
struct ecm_db_interface_info_map_t map_t; /* type == ECM_DB_IFACE_TYPE_MAP_T */
#endif
#ifdef ECM_INTERFACE_GRE_TUN_ENABLE
struct ecm_db_interface_info_gre_tun gre_tun; /* type == ECM_DB_IFACE_TYPE_GRE */
#endif
struct ecm_db_interface_info_unknown unknown; /* type == ECM_DB_IFACE_TYPE_UNKNOWN */
struct ecm_db_interface_info_loopback loopback; /* type == ECM_DB_IFACE_TYPE_LOOPBACK */
#ifdef ECM_INTERFACE_IPSEC_ENABLE
struct ecm_db_interface_info_ipsec_tunnel ipsec_tunnel; /* type == ECM_DB_IFACE_TYPE_IPSEC_TUNNEL */
#endif
#ifdef ECM_INTERFACE_SIT_ENABLE
struct ecm_db_interface_info_sit sit; /* type == ECM_DB_IFACE_TYPE_SIT */
#endif
#ifdef ECM_INTERFACE_TUNIPIP6_ENABLE
#ifdef ECM_IPV6_ENABLE
struct ecm_db_interface_info_tunipip6 tunipip6; /* type == ECM_DB_IFACE_TYPE_TUNIPIP6 */
#endif
#endif
#ifdef ECM_INTERFACE_RAWIP_ENABLE
struct ecm_db_interface_info_rawip rawip; /* type == ECM_DB_IFACE_TYPE_RAWIP */
#endif
#ifdef ECM_INTERFACE_OVPN_ENABLE
struct ecm_db_interface_info_ovpn ovpn; /* type == ECM_DB_IFACE_TYPE_OVPN */
#endif
#ifdef ECM_INTERFACE_VXLAN_ENABLE
struct ecm_db_interface_info_vxlan vxlan; /* type == ECM_DB_IFACE_TYPE_VXLAN */
#endif
#ifdef ECM_INTERFACE_OVS_BRIDGE_ENABLE
struct ecm_db_interface_info_ovs_bridge ovsb; /* type == ECM_DB_IFACE_TYPE_OVS_BRIDGE */
#endif
} type_info;
#ifdef ECM_INTERFACE_GRE_TUN_ENABLE
struct ip_tunnel *gre4_tunnel;
struct ip6_tnl *gre6_tunnel;
#endif
#ifdef ECM_INTERFACE_PPP_ENABLE
int channel_count;
struct ppp_channel *ppp_chan[1];
int channel_protocol;
#ifdef ECM_INTERFACE_PPTP_ENABLE
int protocol = IPPROTO_IP;
struct pptp_opt opt;
struct iphdr *v4_hdr = NULL;
if (skb) {
v4_hdr = ip_hdr(skb);
protocol = v4_hdr->protocol;
}
#endif
#endif
/*
* Get basic information about the given device
*/
dev_interface_num = dev->ifindex;
dev_name = dev->name;
dev_type = dev->type;
dev_mtu = dev->mtu;
/*
* Does the accel engine recognise this interface?
*/
ae_interface_num = feci->ae_interface_number_by_dev_get(dev);
DEBUG_TRACE("%px: Establish interface instance for device: %px is type: %d, name: %s, ifindex: %d, ae_if: %d, mtu: %d\n",
feci, dev, dev_type, dev_name, dev_interface_num, ae_interface_num, dev_mtu);
/*
* Extract from the device more type-specific information
*/
if (dev_type == ARPHRD_ETHER) {
/*
* If MAC address is zeros, do nothing.
*/
if (is_zero_ether_addr(dev->dev_addr)) {
DEBUG_WARN("%px: Net device %px MAC address is all zeros\n", feci, dev);
return NULL;
}
/*
* Ethernet - but what sub type?
*/
#ifdef ECM_INTERFACE_VLAN_ENABLE
/*
* VLAN?
*/
if (is_vlan_dev(dev)) {
/*
* VLAN master
* GGG No locking needed here, ASSUMPTION is that real_dev is held for as long as we have dev.
*/
ether_addr_copy(type_info.vlan.address, dev->dev_addr);
type_info.vlan.vlan_tag = vlan_dev_vlan_id(dev);
type_info.vlan.vlan_tpid = ntohs(vlan_dev_vlan_proto(dev));
DEBUG_TRACE("%px: Net device: %px is VLAN, mac: %pM, vlan_id: %x vlan_tpid: %x\n",
feci, dev, type_info.vlan.address, type_info.vlan.vlan_tag, type_info.vlan.vlan_tpid);
/*
* Establish this type of interface
*/
ii = ecm_interface_vlan_interface_establish(&type_info.vlan, dev_name, dev_interface_num, ae_interface_num, dev_mtu);
goto identifier_update;
}
#endif
#ifdef ECM_INTERFACE_MACVLAN_ENABLE
/*
* MACVLAN?
*/
if (netif_is_macvlan(dev)) {
if (ecm_interface_macvlan_mode_is_valid(dev)) {
ether_addr_copy(type_info.macvlan.address, dev->dev_addr);
DEBUG_TRACE("%px: Net device: %px is MACVLAN, mac: %pM\n",
feci, dev, type_info.macvlan.address);
/*
* Establish this type of interface
*/
ii = ecm_interface_macvlan_interface_establish(&type_info.macvlan, dev_name, dev_interface_num, ae_interface_num, dev_mtu);
goto identifier_update;
}
DEBUG_WARN("%px: Net device %px MACVLAN mode is not supported.\n", feci, dev);
return NULL;
}
#endif
#ifdef ECM_INTERFACE_VXLAN_ENABLE
/*
* VxLAN?
*/
if (netif_is_vxlan(dev)) {
u32 vni;
struct vxlan_dev *vxlan_tun;
ip_addr_t vxlan_saddr, vxlan_daddr;
/*
* VxLAN
*/
vxlan_tun = netdev_priv(dev);
vni = vxlan_get_vni(vxlan_tun);
DEBUG_TRACE("%px: Net device: %px is VxLAN, mac: %pM, vni: %d\n",
feci, dev, dev->dev_addr, vni);
interface_type = ecm_interface_vxlan_type_get(skb);
ae_interface_num = feci->ae_interface_number_by_dev_type_get(dev, interface_type);
DEBUG_TRACE("%px: VxLAN netdevice interface ae_interface_num: %d, interface_type: %d\n",
feci, ae_interface_num, interface_type);
type_info.vxlan.vni = vni;
type_info.vxlan.if_type = interface_type;
/*
* Copy IP addresses from skb
*/
if (ip_hdr(skb)->version == IPVERSION) {
ECM_NIN4_ADDR_TO_IP_ADDR(vxlan_saddr, ip_hdr(skb)->saddr);
ECM_NIN4_ADDR_TO_IP_ADDR(vxlan_daddr, ip_hdr(skb)->daddr);
} else {
ECM_NIN6_ADDR_TO_IP_ADDR(vxlan_saddr, ipv6_hdr(skb)->saddr);
ECM_NIN6_ADDR_TO_IP_ADDR(vxlan_daddr, ipv6_hdr(skb)->daddr);
}
if (ecm_interface_tunnel_mtu_update(vxlan_saddr, vxlan_daddr,
ECM_DB_IFACE_TYPE_VXLAN, &dev_mtu)) {
DEBUG_TRACE("%px: VxLAN netdevice mtu updated: %d\n", feci, dev_mtu);
}
/*
* Establish this type of interface
*/
ii = ecm_interface_vxlan_interface_establish(&type_info.vxlan, dev_name, dev_interface_num, ae_interface_num, dev_mtu);
goto identifier_update;
}
#endif
/*
* BRIDGE?
*/
if (ecm_front_end_is_bridge_device(dev)) {
/*
* Bridge
*/
ether_addr_copy(type_info.bridge.address, dev->dev_addr);
DEBUG_TRACE("%px: Net device: %px is BRIDGE, mac: %pM\n",
feci, dev, type_info.bridge.address);
/*
* Establish this type of interface
*/
ii = ecm_interface_bridge_interface_establish(&type_info.bridge, dev_name, dev_interface_num, ae_interface_num, dev_mtu);
goto identifier_update;
}
/*
* OVS BRIDGE?
*/
#ifdef ECM_INTERFACE_OVS_BRIDGE_ENABLE
if (ovsmgr_is_ovs_master(dev)) {
/*
* OVS Bridge
*/
ether_addr_copy(type_info.ovsb.address, dev->dev_addr);
DEBUG_TRACE("%px: Net device: %px is OVS BRIDGE, mac: %pM\n",
feci, dev, type_info.ovsb.address);
/*
* Establish this type of interface
*/
ii = ecm_interface_ovs_bridge_interface_establish(&type_info.ovsb, dev_name, dev_interface_num, ae_interface_num, dev_mtu);
goto identifier_update;
}
#endif
#ifdef ECM_INTERFACE_BOND_ENABLE
/*
* LAG?
*/
if (ecm_front_end_is_lag_master(dev)) {
/*
* Link aggregation
*/
ether_addr_copy(type_info.lag.address, dev->dev_addr);
DEBUG_TRACE("%px: Net device: %px is LAG, mac: %pM\n",
feci, dev, type_info.lag.address);
/*
* Establish this type of interface
*/
ii = ecm_interface_lag_interface_establish(&type_info.lag, dev_name, dev_interface_num, ae_interface_num, dev_mtu);
goto identifier_update;
}
#endif
#ifdef ECM_INTERFACE_GRE_TAP_ENABLE
/*
* GRE TAP?
*/
if (dev->priv_flags_ext & (IFF_EXT_GRE_V4_TAP | IFF_EXT_GRE_V6_TAP)) {
interface_type = feci->ae_interface_type_get(feci, dev);
ae_interface_num = feci->ae_interface_number_by_dev_type_get(dev, interface_type);
/*
* GRE TAP interface is handled as ethernet interface, however it is possible
* that the acceleration engine may not be ready yet to handle the connection.
* In this case the acceleration engine interface is not found for this type and
* we should wait until it is ready.
*/
if (ae_interface_num < 0) {
DEBUG_TRACE("%px: GRE TAP interface is not ready yet. Interface type: %d\n", feci, interface_type);
return NULL;
}
}
#endif
/*
* ETHERNET!
* Just plain ethernet it seems
*/
ether_addr_copy(type_info.ethernet.address, dev->dev_addr);
DEBUG_TRACE("%px: Net device: %px is ETHERNET, mac: %pM\n",
feci, dev, type_info.ethernet.address);
/*
* Establish this type of interface
*/
ii = ecm_interface_ethernet_interface_establish(&type_info.ethernet, dev_name, dev_interface_num, ae_interface_num, dev_mtu);
identifier_update:
if (ii) {
/*
* An interface identifier/ifindex can be change after network restart. Below
* functtion will check interface_identifier present in 'ii' with new dev_interface_num.
* If differ then update new ifindex and update the interface identifier hash table.
*/
ecm_db_iface_identifier_hash_table_entry_check_and_update(ii, dev_interface_num);
}
return ii;
}
/*
* LOOPBACK?
*/
if (dev_type == ARPHRD_LOOPBACK) {
DEBUG_TRACE("%px: Net device: %px is LOOPBACK type: %d\n", feci, dev, dev_type);
type_info.loopback.os_specific_ident = dev_interface_num;
ii = ecm_interface_loopback_interface_establish(&type_info.loopback, dev_name, dev_interface_num, ae_interface_num, dev_mtu);
return ii;
}
#ifdef ECM_INTERFACE_IPSEC_ENABLE
/*
* IPSEC?
*/
if (dev_type == ECM_ARPHRD_IPSEC_TUNNEL_TYPE) {
#ifdef ECM_INTERFACE_IPSEC_GLUE_LAYER_SUPPORT_ENABLE
struct net_device *ipsec_dev;
ip_addr_t saddr, daddr;
DEBUG_TRACE("Net device: %px is IPSec tunnel type: %d\n", dev, dev_type);
ipsec_dev = ecm_interface_get_and_hold_ipsec_tun_netdev(dev, skb, &interface_type);
if (!ipsec_dev) {
DEBUG_WARN("Failed to find NSS IPSec dev for: %s and type: %d\n", dev->name, dev_type);
return NULL;
}
ae_interface_num = feci->ae_interface_number_by_dev_type_get(ipsec_dev, interface_type);
if (ae_interface_num < 0) {
DEBUG_TRACE("IPSec interface %s is not ready yet\n", ipsec_dev->name);
dev_put(ipsec_dev);
return NULL;
}
dev_put(ipsec_dev);
/*
* Copy IP addresses from skb
*/
if (ip_hdr(skb)->version == IPVERSION) {
ECM_NIN4_ADDR_TO_IP_ADDR(saddr, ip_hdr(skb)->saddr);
ECM_NIN4_ADDR_TO_IP_ADDR(daddr, ip_hdr(skb)->daddr);
} else {
ECM_NIN6_ADDR_TO_IP_ADDR(saddr, ipv6_hdr(skb)->saddr);
ECM_NIN6_ADDR_TO_IP_ADDR(daddr, ipv6_hdr(skb)->daddr);
}
ecm_interface_tunnel_mtu_update(saddr, daddr, ECM_DB_IFACE_TYPE_IPSEC_TUNNEL, &dev_mtu);
#endif
type_info.ipsec_tunnel.os_specific_ident = dev_interface_num;
/*
* Override the MTU size in the decap direction in case of IPSec tunnel.
* This will apply to IPsec->WAN rule.
* TODO: Move this override to accelerate function.
*/
if (ip_hdr(skb)->version == IPVERSION) {
if ((ip_hdr(skb)->protocol == IPPROTO_ESP) ||
((ip_hdr(skb)->protocol == IPPROTO_UDP) &&
(udp_hdr(skb)->dest == htons(4500)))) {
dev_mtu = ECM_DB_IFACE_MTU_MAX;
}
} else {
if (ipv6_hdr(skb)->nexthdr == IPPROTO_ESP) {
dev_mtu = ECM_DB_IFACE_MTU_MAX;
}
}
ii = ecm_interface_ipsec_tunnel_interface_establish(&type_info.ipsec_tunnel, dev_name, dev_interface_num, ae_interface_num, dev_mtu);
if (ii) {
ecm_db_iface_identifier_hash_table_entry_check_and_update(ii, dev_interface_num);
}
return ii;
}
#endif
#ifdef ECM_INTERFACE_MAP_T_ENABLE
if (dev_type == ARPHRD_NONE) {
if (is_map_t_dev(dev)) {
type_info.map_t.if_index = dev_interface_num;
interface_type = feci->ae_interface_type_get(feci, dev);
ae_interface_num = feci->ae_interface_number_by_dev_type_get(dev, interface_type);
if (ae_interface_num < 0) {
DEBUG_TRACE("%px: MAP-T interface is not ready yet\n", feci);
return NULL;
}
ii = ecm_interface_map_t_interface_establish(&type_info.map_t, dev_name, dev_interface_num, ae_interface_num, dev_mtu);
return ii;
}
}
#endif
#ifdef ECM_INTERFACE_SIT_ENABLE
#ifdef CONFIG_IPV6_SIT_6RD
/*
* SIT (6-in-4)?
*/
if (dev_type == ARPHRD_SIT) {
struct ip_tunnel *tunnel;
struct ip_tunnel_6rd_parm *ip6rd;
const struct iphdr *tiph;