blob: d54b5903a95d2e303242a1719b05eb267149d768 [file] [log] [blame] [edit]
/*
* Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*
* Permission to use, copy, modify, and/or distribute this software for any
* purpose with or without fee is hereby granted, provided that the above
* copyright notice and this permission notice appear in all copies.
*
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
*/
#include <linux/version.h>
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <crypto/aes.h>
#include <crypto/gcm.h>
#include <crypto/algapi.h>
#include "sfe_xfrm.h"
static uint16_t ip4_id = 1; /* IPv4 header Identifier */
/*
* sfe_xfrm_add_ipv4()
* Add IPv4 header.
*/
static inline void sfe_xfrm_add_ipv4(struct sfe_xfrm_sa *sa, struct sk_buff *skb, uint8_t ip_proto)
{
struct sfe_xfrm_sa_hdr *hdr = &sa->hdr;
struct iphdr *iph;
iph = skb_push(skb, sizeof(struct iphdr));
skb_reset_network_header(skb);
iph->version = IPVERSION;
iph->ihl = sizeof(struct iphdr) >> 2;
iph->tos = 0;
iph->tot_len = htons(skb->len);
iph->id = htons(ip4_id++);
iph->frag_off = 0;
iph->ttl = IPDEFTTL;
iph->protocol = ip_proto;
iph->saddr = hdr->src_ip[0];
iph->daddr = hdr->dst_ip[0];
skb->ip_summed = CHECKSUM_NONE;
iph->check = 0;
iph->check = ip_fast_csum(iph, iph->ihl);
}
/*
* sfe_xfrm_add_udp()
* Add UDP header.
*/
static inline void sfe_xfrm_add_udp(struct sfe_xfrm_sa *sa, struct sk_buff *skb)
{
struct sfe_xfrm_sa_hdr *hdr = &sa->hdr;
struct udphdr *uh;
uh = __skb_push(skb, sizeof(struct udphdr));
skb_reset_transport_header(skb);
uh->dest = hdr->dport;
uh->source = hdr->sport;
uh->len = htons(skb->len);
uh->check = 0;
}
/*
* sfe_xfrm_add_esp_gcm()
* Add ESP header and trailer. This is specifically optimized for GCM.
*/
static inline void sfe_xfrm_add_esp(struct sfe_xfrm_sa *sa, struct sk_buff *skb, uint8_t proto)
{
struct sfe_xfrm_sa_state_enc *enc = &sa->state.enc;
struct sfe_xfrm_sa_hdr *hdr = &sa->hdr;
struct ip_esp_trailer *trailer;
struct ip_esp_hdr *esph;
uint16_t pad_len;
uint16_t blk_len;
uint8_t *pad;
uint8_t i;
/*
* Add ESP header & IV.
* IV will be filled in encrypt_auth()
*/
esph = (struct ip_esp_hdr *)__skb_push(skb, sizeof(*esph) + enc->iv_len);
esph->spi = hdr->spi;
esph->seq_no = htonl(enc->esp_seq++);
/*
* Add the padding.
*/
blk_len = enc->blk_len;
pad_len = ALIGN(skb->len + sizeof(*trailer), blk_len) - (skb->len + sizeof(*trailer));
/*
* Add ESP trailer and ICV.
* ICV will be filled during encrypt_auth().
*/
pad = __skb_put(skb, pad_len + sizeof(*trailer) + enc->icv_len);
for (i = 1; i <= pad_len; i++) {
*pad++ = i;
}
trailer = (struct ip_esp_trailer *)pad;
trailer->pad_len = pad_len;
trailer->next_hdr = proto;
}
/*
* sfe_xfrm_ip4_send()
* transmit encapsulated packet out.
*/
void sfe_xfrm_ip4_send(struct sfe_xfrm_sa *sa, struct sk_buff *skb)
{
struct sfe_xfrm_sa_state_enc *enc = &sa->state.enc;
struct sfe_xfrm_dev_stats *dev_stats;
struct sfe_xfrm_sa_stats *sa_stats;
struct dst_entry *dst;
struct rtable *rt;
dev_stats = this_cpu_ptr(sa->dev->stats_pcpu);
sa_stats = this_cpu_ptr(sa->stats_pcpu);
dst = dst_cache_get(&enc->dst_cache);
if (likely(dst)) {
goto send_buf;
}
rt = ip_route_output(&init_net, ip_hdr(skb)->daddr, ip_hdr(skb)->saddr, 0, 0);
if (IS_ERR(rt)) {
sa_stats->fail_route++;
dev_stats->tx_fail++;
dev_kfree_skb_any(skb);
return;
}
dst = &rt->dst;
sa_stats->fail_dst_cache++;
dst_cache_set_ip4(&enc->dst_cache, dst, ip_hdr(skb)->saddr);
send_buf:
/*
* Drop existing dst and set new.
*/
skb_scrub_packet(skb, false);
skb_dst_set(skb, dst);
/*
* Reset General SKB fields for further processing.
*/
skb->protocol = htons(ETH_P_IP);
skb->skb_iif = sa->ifindex;
skb->ip_summed = CHECKSUM_COMPLETE;
memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
IPCB(skb)->flags |= IPSKB_XFRM_TRANSFORMED;
/*
* Send packet out and update Tx statistics.
*/
sa_stats->tx_pkts++;
sa_stats->tx_bytes += skb->len;
dev_stats->tx_pkts++;
dev_stats->tx_bytes += skb->len;
ip_local_out(&init_net, NULL, skb);
}
/*
* sfe_xfrm_encrypt_auth_gcm()
* Encrypt the SKB. SKB data must be pointing to ESP header & block aligned.
*/
void sfe_xfrm_encrypt_auth_gcm(struct sfe_xfrm_sa *sa, struct sk_buff *skb)
{
struct sfe_xfrm_sa_state_enc *enc = &sa->state.enc;
struct crypto_sync_skcipher *etfm = sa->enc_tfm;
struct crypto_shash *atfm = sa->auth_tfm;
uint8_t zero_enc[AES_BLOCK_SIZE] = {0};
uint8_t *esph, *data, *pkt_hmac;
uint32_t iv[4], *pkt_iv;
uint32_t data_len;
int ret;
esph = skb->data + enc->esp_offset;
pkt_iv = (uint32_t *)(esph + sizeof(struct ip_esp_hdr));
pkt_hmac = skb_tail_pointer(skb) - enc->icv_len;
/*
* Generate IV for encryption.
*/
iv[0] = enc->nonce; /* Nonce */
iv[1] = enc->iv_seq[0] ^ enc->salt[0]; /* Explicit IV 0 */
iv[2] = enc->iv_seq[1] ^ enc->salt[1]; /* Explicit IV 1 */
iv[3] = htonl(0x1); /* CTR counter start value */
(*(uint64_t *)&enc->iv_seq)++;
/*
* Copy explicit IV to packet.
*/
*pkt_iv++ = iv[1];
*pkt_iv++ = iv[2];
/*
* Set cipher data start and length.
*/
data = (uint8_t *)pkt_iv;
data_len = pkt_hmac - data;
/*
* Encrypt the data.
* do-while used to reduce stack utilzation for ON_STACK variable.
*/
do {
struct scatterlist sg[2];
/*
* TODO: Allocate on heap
*/
SYNC_SKCIPHER_REQUEST_ON_STACK(ereq, etfm);
sg_init_table(sg, 2);
sg_set_buf(&sg[0], zero_enc, sizeof(zero_enc));
sg_set_buf(&sg[1], data, data_len);
skcipher_request_set_sync_tfm(ereq, etfm);
skcipher_request_set_callback(ereq, 0, NULL, NULL);
skcipher_request_set_crypt(ereq, sg, sg, data_len + sizeof(zero_enc), iv);
ret = crypto_skcipher_encrypt(ereq);
BUG_ON(ret);
} while (0);
/*
* Generate hash for encrypted data.
* do-while used to reduce stack utilzation for ON_STACK variable.
*/
do {
uint8_t zero_pad[GHASH_BLOCK_SIZE] = {0};
uint8_t unaligned_len;
be128 final_blk;
/*
* TODO: Allocate this on heap
*/
SHASH_DESC_ON_STACK(areq, atfm);
areq->tfm = atfm;
ret = crypto_shash_init(areq);
BUG_ON(ret);
/*
* Authenticate the ESP header
*/
crypto_shash_update(areq, esph, sizeof(struct ip_esp_hdr));
/*
* Authenticate, a Fixed 8-byte padding for ESP header.
*/
crypto_shash_update(areq, zero_pad, 8);
/*
* Authenticate, payload minus iv
*/
crypto_shash_update(areq, data, data_len);
/*
* If, payload is unaligned then authenticate additonal pad bytes
*/
unaligned_len = data_len & (GHASH_BLOCK_SIZE - 1);
if (unaligned_len) {
crypto_shash_update(areq, zero_pad, GHASH_BLOCK_SIZE - unaligned_len);
}
/*
* Final block contains length in bits.
* Generate HMAC directly in SKB tail.
* We may be writing more than icv_len but it is OK as we have enough tailroom.
*/
final_blk.a = cpu_to_be64(sizeof(struct ip_esp_hdr) * 8);
final_blk.b = cpu_to_be64(data_len * 8);
ret = crypto_shash_finup(areq, (uint8_t *)&final_blk, sizeof(final_blk), pkt_hmac);
BUG_ON(ret);
crypto_xor(pkt_hmac, zero_enc, GHASH_BLOCK_SIZE);
} while (0);
}
/*
* sfe_xfrm_add_hdr_natt()
* Add IPv4 encapsulation headers for NATT.
*/
void sfe_xfrm_add_hdr_natt(struct sfe_xfrm_sa *sa, struct sk_buff *skb)
{
/*
* Insert ESP, UDP & IP header.
*/
sfe_xfrm_add_esp(sa, skb, IPPROTO_IPIP);
sfe_xfrm_add_udp(sa, skb);
sfe_xfrm_add_ipv4(sa, skb, IPPROTO_UDP);
}
/*
* sfe_xfrm_add_hdr_v4()
* Add IPv4 encapsulation headers.
*/
void sfe_xfrm_add_hdr_v4(struct sfe_xfrm_sa *sa, struct sk_buff *skb)
{
/*
* Insert ESP & IP header.
*/
sfe_xfrm_add_esp(sa, skb, IPPROTO_IPIP);
sfe_xfrm_add_ipv4(sa, skb, IPPROTO_ESP);
}
/*
* sfe_xfrm_add_hdr_ip6()
* Add IPv6 encapsulation header & encrypt.
*/
void sfe_xfrm_add_hdr_ip6(struct sfe_xfrm_sa *sa, struct sk_buff *skb)
{
pr_err("%p: Not implemented\n", sa);
BUG_ON(1);
}
/*
* sfe_xfrm_enc()
* Encapsulates plaintext packet.
*/
netdev_tx_t sfe_xfrm_enc(struct sk_buff *skb, struct net_device *ndev)
{
struct sfe_xfrm_dev *dev = netdev_priv(ndev);
bool nonlinear = skb_is_nonlinear(skb);
struct sfe_xfrm_dev_stats *dev_stats;
struct sfe_xfrm_sa_stats *sa_stats;
struct sfe_xfrm_sa_state_enc *enc;
struct sfe_xfrm_sa *sa;
dev_stats = this_cpu_ptr(dev->stats_pcpu);
/*
* Unshare the SKB as we will be modifying it.
*/
if (unlikely(skb_shared(skb))) {
dev_stats->tx_fail_shared++;
goto drop;
}
/*
* Linearize the nonlinear SKB.
* TODO: add support for SG.
*/
if (nonlinear && __skb_linearize(skb)) {
pr_debug("%px: Failed to linearize the SKB\n", ndev);
dev_stats->tx_fail_linearize++;
goto drop;
}
dev_stats->tx_linearize += nonlinear;
/*
* First SA in the device encapsulation head is always selected.
*/
rcu_read_lock_bh();
sa = rcu_dereference(dev->sa);
if (unlikely(!sa)) {
pr_debug("%px: Failed to find a valid SA for encapsulation\n", ndev);
dev_stats->tx_fail_sa++;
goto fail;
}
/*
* Packets with insufficient headroom & tailroom will be dropped
*/
enc = &sa->state.enc;
if ((skb_headroom(skb) < enc->head_room) || (skb_tailroom(skb) < enc->tail_room)) {
pr_debug("%px: dropping SKB(%p): hroom(%u) or troom(%u)\n", ndev, skb, skb_headroom(skb), skb_tailroom(skb));
dev_stats->tx_fail_hroom += (skb_headroom(skb) < enc->head_room);
dev_stats->tx_fail_troom += (skb_tailroom(skb) < enc->tail_room);
goto fail;
}
sa_stats = this_cpu_ptr(sa->stats_pcpu);
if (unlikely(!enc->esp_seq)) {
sa_stats->fail_seq++;
pr_debug("%px: ESP Sequence overflowed SPI(0x%X)", skb, htonl(sa->hdr.spi));
goto fail;
}
/*
* Update Rx statistics.
*/
sa_stats->rx_pkts++;
sa_stats->rx_bytes += skb->len;
/*
* Following operations are performed
* 1. Add ESP header to the packet
* 2. Encrypt payload and authenticate
* 3. Add IP headers & transmit
*/
enc->add_hdr(sa, skb);
enc->encrypt_auth(sa, skb);
enc->ip_send(sa, skb);
rcu_read_unlock_bh();
return NETDEV_TX_OK;
fail:
rcu_read_unlock_bh();
drop:
dev_kfree_skb_any(skb);
dev_stats->tx_fail++;
return NETDEV_TX_OK;
}