blob: 63edc6eeeb111e51995455f37c6369ff1bc525b8 [file] [log] [blame]
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
*/
#include "testmode.h"
#include <net/netlink.h>
#include "debug.h"
#include "wmi.h"
#include "hw.h"
#include "core.h"
#include "hif.h"
#include "testmode_i.h"
#define FTM_SEGHDR_CURRENT_SEQ GENMASK(3, 0)
#define FTM_SEGHDR_TOTAL_SEGMENTS GENMASK(7, 4)
static const struct nla_policy ath11k_tm_policy[ATH11K_TM_ATTR_MAX + 1] = {
[ATH11K_TM_ATTR_CMD] = { .type = NLA_U32 },
[ATH11K_TM_ATTR_DATA] = { .type = NLA_BINARY,
.len = ATH11K_TM_DATA_MAX_LEN },
[ATH11K_TM_ATTR_WMI_CMDID] = { .type = NLA_U32 },
[ATH11K_TM_ATTR_VERSION_MAJOR] = { .type = NLA_U32 },
[ATH11K_TM_ATTR_VERSION_MINOR] = { .type = NLA_U32 },
[ATH11K_TM_ATTR_FWLOG] = { .type = NLA_BINARY,
.len = ATH11K_FWLOG_MAX_LEN },
};
void ath11k_fwlog_write(struct ath11k_base *ab, u8 *data, int len)
{
struct sk_buff *nl_skb;
int ret, i;
struct ath11k *ar = NULL;
struct ath11k_pdev *pdev;
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
if (pdev && pdev->ar) {
ar = pdev->ar;
break;
}
}
if (!ar)
return;
nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy,
len, GFP_ATOMIC);
if (!nl_skb) {
ath11k_warn(ab,
"failed to allocate skb for fwlog event\n");
return;
}
ret = nla_put(nl_skb, ATH11K_TM_ATTR_FWLOG, len, data);
if (ret) {
ath11k_warn(ab,
"failed to to put fwlog wmi event to nl: %d\n",
ret);
kfree_skb(nl_skb);
return;
}
cfg80211_testmode_event(nl_skb, GFP_ATOMIC, true);
}
/* Returns true if callee consumes the skb and the skb should be discarded.
* Returns false if skb is not used. Does not sleep.
* Unsegmented events are handled here. Segments are aggregated in appln layer
*/
bool ath11k_wmi_tm_event_unsegmented(struct ath11k_base *ab, u32 cmd_id,
struct sk_buff *skb)
{
struct sk_buff *nl_skb;
struct ath11k *ar;
bool consumed;
int ret, i;
struct ath11k_pdev *pdev;
ath11k_dbg(ab, ATH11K_DBG_TESTMODE,
"testmode event wmi cmd_id %d skb %pK skb->len %d\n",
cmd_id, skb, skb->len);
ath11k_dbg_dump(ab, ATH11K_DBG_TESTMODE, NULL, "", skb->data, skb->len);
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ar = pdev->ar;
if (ar) {
if (ar->state == ATH11K_STATE_TM)
break;
}
}
if (i >= ab->num_radios) {
ath11k_warn(ab, "testmode event not handled due to invalid pdev\n");
return false;
}
spin_lock_bh(&ar->data_lock);
/* Only testmode.c should be handling events from utf firmware,
* otherwise all sort of problems will arise as mac80211 operations
* are not initialised.
*/
consumed = true;
nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy,
2 * sizeof(u32) + skb->len,
GFP_ATOMIC);
if (!nl_skb) {
ath11k_warn(ab,
"failed to allocate skb for testmode wmi event\n");
goto out;
}
ret = nla_put_u32(nl_skb, ATH11K_TM_ATTR_CMD, ATH11K_TM_CMD_WMI);
if (ret) {
ath11k_warn(ab,
"failed to put testmode wmi event cmd attribute: %d\n",
ret);
kfree_skb(nl_skb);
goto out;
}
ret = nla_put_u32(nl_skb, ATH11K_TM_ATTR_WMI_CMDID, cmd_id);
if (ret) {
ath11k_warn(ab,
"failed to put testmode wmi event cmd_id: %d\n",
ret);
kfree_skb(nl_skb);
goto out;
}
ret = nla_put(nl_skb, ATH11K_TM_ATTR_DATA, skb->len, skb->data);
if (ret) {
ath11k_warn(ab,
"failed to copy skb to testmode wmi event: %d\n",
ret);
kfree_skb(nl_skb);
goto out;
}
cfg80211_testmode_event(nl_skb, GFP_ATOMIC, false);
out:
spin_unlock_bh(&ar->data_lock);
return consumed;
}
/* Returns true if callee consumes the skb and the skb should be discarded.
* Returns false if skb is not used. Does not sleep.
* Segmented events are hangled here.
* Data of various events received from fw is aggregated and
* sent to application layer
*/
bool ath11k_process_tm_event(struct ath11k_base *ab, u32 cmd_id,
const struct wmi_ftm_event_msg *ftm_msg,
u16 length)
{
struct sk_buff *nl_skb;
bool consumed;
int ret;
struct ath11k *ar;
u8 *buf_pos;
u16 datalen;
u8 total_segments, current_seq;
u32 data_pos;
u32 pdev_id;
ath11k_dbg(ab, ATH11K_DBG_TESTMODE,
"testmode event wmi cmd_id %d ftm event msg %pK datalen %d\n",
cmd_id, ftm_msg, length);
ath11k_dbg_dump(ab, ATH11K_DBG_TESTMODE, NULL, "", ftm_msg, length);
pdev_id = DP_HW2SW_MACID(ftm_msg->seg_hdr.pdev_id);
if (pdev_id >= ab->num_radios) {
ath11k_warn(ab, "testmode event not handled due to invalid pdev id\n");
return false;
}
ar = ab->pdevs[pdev_id].ar;
if (!ar) {
ath11k_warn(ab, "testmode event not handled due to absence of pdev\n");
return false;
}
if (ar->state != ATH11K_STATE_TM) {
ath11k_warn(ab, "testmode event not handled due to state error\n");
return false;
}
spin_lock_bh(&ar->data_lock);
consumed = true;
current_seq = FIELD_GET(FTM_SEGHDR_CURRENT_SEQ,
ftm_msg->seg_hdr.segmentinfo);
total_segments = FIELD_GET(FTM_SEGHDR_TOTAL_SEGMENTS,
ftm_msg->seg_hdr.segmentinfo);
datalen = length - (sizeof(struct wmi_ftm_seg_hdr));
buf_pos = (u8 *)ftm_msg->data;
if (current_seq == 0) {
ab->ftm_event_obj.expected_seq = 0;
ab->ftm_event_obj.data_pos = 0;
}
data_pos = ab->ftm_event_obj.data_pos;
if ((data_pos + datalen) > ATH11K_FTM_EVENT_MAX_BUF_LENGTH) {
ath11k_warn(ab,
"Invalid event length date_pos[%d] datalen[%d]\n",
data_pos, datalen);
goto out;
}
memcpy(&ab->ftm_event_obj.eventdata[data_pos], buf_pos, datalen);
data_pos += datalen;
if (++ab->ftm_event_obj.expected_seq != total_segments) {
ab->ftm_event_obj.data_pos = data_pos;
ath11k_dbg(ab, ATH11K_DBG_TESTMODE,
"partial data received current_seq[%d], total_seg[%d]\n",
current_seq, total_segments);
goto out;
}
ath11k_dbg(ab, ATH11K_DBG_TESTMODE,
"total data length[%d] = [%d]\n",
data_pos, ftm_msg->seg_hdr.len);
nl_skb = cfg80211_testmode_alloc_event_skb(ar->hw->wiphy,
2 * sizeof(u32) + data_pos,
GFP_ATOMIC);
if (!nl_skb) {
ath11k_warn(ab,
"failed to allocate skb for testmode wmi event\n");
goto out;
}
ret = nla_put_u32(nl_skb, ATH11K_TM_ATTR_CMD,
ATH11K_TM_CMD_WMI_FTM);
if (ret) {
ath11k_warn(ab,
"failed to put testmode wmi event cmd attribute: %d\n",
ret);
kfree_skb(nl_skb);
goto out;
}
ret = nla_put_u32(nl_skb, ATH11K_TM_ATTR_WMI_CMDID, cmd_id);
if (ret) {
ath11k_warn(ab,
"failed to put testmode wmi even cmd_id: %d\n",
ret);
kfree_skb(nl_skb);
goto out;
}
ret = nla_put(nl_skb, ATH11K_TM_ATTR_DATA, data_pos,
&ab->ftm_event_obj.eventdata[0]);
if (ret) {
ath11k_warn(ab,
"failed to copy skb to testmode wmi event: %d\n",
ret);
kfree_skb(nl_skb);
goto out;
}
cfg80211_testmode_event(nl_skb, GFP_ATOMIC, false);
out:
spin_unlock_bh(&ar->data_lock);
return consumed;
}
static int ath11k_tm_cmd_get_version(struct ath11k *ar, struct nlattr *tb[])
{
struct sk_buff *skb;
int ret;
ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
"testmode cmd get version_major %d version_minor %d\n",
ATH11K_TESTMODE_VERSION_MAJOR,
ATH11K_TESTMODE_VERSION_MINOR);
skb = cfg80211_testmode_alloc_reply_skb(ar->hw->wiphy,
nla_total_size(sizeof(u32)));
if (!skb)
return -ENOMEM;
ret = nla_put_u32(skb, ATH11K_TM_ATTR_VERSION_MAJOR,
ATH11K_TESTMODE_VERSION_MAJOR);
if (ret) {
kfree_skb(skb);
return ret;
}
ret = nla_put_u32(skb, ATH11K_TM_ATTR_VERSION_MINOR,
ATH11K_TESTMODE_VERSION_MINOR);
if (ret) {
kfree_skb(skb);
return ret;
}
return cfg80211_testmode_reply(skb);
}
static int ath11k_tm_cmd_testmode_start(struct ath11k *ar, struct nlattr *tb[])
{
int ret;
ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE, " enter testmode cmd fw start\n");
mutex_lock(&ar->conf_mutex);
if (ar->state == ATH11K_STATE_TM) {
ret = -EALREADY;
goto err;
}
/* start utf only when the driver is not in use */
if (ar->state != ATH11K_STATE_OFF) {
ret = -EBUSY;
goto err;
}
ar->ab->ftm_event_obj.eventdata =
kzalloc(ATH11K_FTM_EVENT_MAX_BUF_LENGTH, GFP_KERNEL);
if (!ar->ab->ftm_event_obj.eventdata) {
ret = -ENOMEM;
goto err;
}
ar->state = ATH11K_STATE_TM;
ar->ftm_msgref = 0;
mutex_unlock(&ar->conf_mutex);
ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE, " enter testmode cmd started\n");
return 0;
err:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static int ath11k_tm_cmd_wmi(struct ath11k *ar, struct nlattr *tb[],
struct ieee80211_vif *vif)
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct sk_buff *skb;
struct ath11k_vif *arvif;
u32 cmd_id, buf_len;
int ret, tag;
void *buf;
u32 *ptr;
mutex_lock(&ar->conf_mutex);
if (!tb[ATH11K_TM_ATTR_DATA]) {
ret = -EINVAL;
goto out;
}
if (!tb[ATH11K_TM_ATTR_WMI_CMDID]) {
ret = -EINVAL;
goto out;
}
buf = nla_data(tb[ATH11K_TM_ATTR_DATA]);
buf_len = nla_len(tb[ATH11K_TM_ATTR_DATA]);
cmd_id = nla_get_u32(tb[ATH11K_TM_ATTR_WMI_CMDID]);
ptr = (u32 *)buf;
tag = FIELD_GET(WMI_TLV_TAG, *ptr);
ptr++;
if (tag == WMI_TAG_PDEV_SET_PARAM_CMD)
*ptr = ar->pdev->pdev_id;
if (ar->ab->fw_mode != ATH11K_FIRMWARE_MODE_FTM &&
(tag == WMI_TAG_VDEV_SET_PARAM_CMD || tag == WMI_TAG_UNIT_TEST_CMD)) {
if (vif) {
arvif = (struct ath11k_vif *)vif->drv_priv;
*ptr = arvif->vdev_id;
}
else {
ret = -EINVAL;
ath11k_warn(ar->ab, "vdev is not up for given vdev id, so failed to send wmi command (testmode): %d\n",
ret);
goto out;
}
}
ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
"testmode cmd wmi cmd_id %d buf %pK buf_len %d\n",
cmd_id, buf, buf_len);
ath11k_dbg_dump(ar->ab, ATH11K_DBG_TESTMODE, NULL, "", buf, buf_len);
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, buf_len);
if (!skb) {
ret = -ENOMEM;
goto out;
}
memcpy(skb->data, buf, buf_len);
ret = ath11k_wmi_cmd_send(wmi, skb, cmd_id);
if (ret) {
dev_kfree_skb(skb);
ath11k_warn(ar->ab, "failed to transmit wmi command (testmode): %d\n",
ret);
goto out;
}
ret = 0;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
static int ath11k_tm_cmd_process_ftm(struct ath11k *ar, struct nlattr *tb[])
{
struct ath11k_pdev_wmi *wmi = ar->wmi;
struct sk_buff *skb;
u32 cmd_id, buf_len, hdr_info;
int ret;
void *buf;
/* if buf_len is 0 no data is sent, return error */
u8 segnumber = 0, seginfo;
u16 chunk_len, total_bytes, num_segments;
u8 *bufpos;
struct wmi_ftm_cmd *ftm_cmd;
mutex_lock(&ar->conf_mutex);
ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE, "ar->state %d\n", ar->state);
if (ar->state != ATH11K_STATE_TM) {
ret = -ENETDOWN;
goto out;
}
if (!tb[ATH11K_TM_ATTR_DATA]) {
ret = -EINVAL;
goto out;
}
buf = nla_data(tb[ATH11K_TM_ATTR_DATA]);
buf_len = nla_len(tb[ATH11K_TM_ATTR_DATA]);
cmd_id = WMI_PDEV_UTF_CMDID;
ath11k_dbg(ar->ab, ATH11K_DBG_TESTMODE,
"testmode cmd wmi cmd_id %d buf %pK buf_len %d\n",
cmd_id, buf, buf_len);
ath11k_dbg_dump(ar->ab, ATH11K_DBG_TESTMODE, NULL, "", buf, buf_len);
bufpos = buf;
total_bytes = buf_len;
num_segments = total_bytes / MAX_WMI_UTF_LEN;
if (buf_len - (num_segments * MAX_WMI_UTF_LEN))
num_segments++;
while (buf_len) {
if (buf_len > MAX_WMI_UTF_LEN)
chunk_len = MAX_WMI_UTF_LEN; /* MAX message */
else
chunk_len = buf_len;
skb = ath11k_wmi_alloc_skb(wmi->wmi_ab, (chunk_len +
sizeof(struct wmi_ftm_cmd)));
if (!skb) {
ret = -ENOMEM;
goto out;
}
ftm_cmd = (struct wmi_ftm_cmd *)skb->data;
hdr_info = FIELD_PREP(WMI_TLV_TAG, WMI_TAG_ARRAY_BYTE) |
FIELD_PREP(WMI_TLV_LEN, (chunk_len +
sizeof(struct wmi_ftm_seg_hdr)));
ftm_cmd->tlv_header = hdr_info;
ftm_cmd->seg_hdr.len = total_bytes;
ftm_cmd->seg_hdr.msgref = ar->ftm_msgref;
seginfo = FIELD_PREP(FTM_SEGHDR_TOTAL_SEGMENTS, num_segments) |
FIELD_PREP(FTM_SEGHDR_CURRENT_SEQ, segnumber);
ftm_cmd->seg_hdr.segmentinfo = seginfo;
segnumber++;
memcpy(&ftm_cmd->data, bufpos, chunk_len);
ret = ath11k_wmi_cmd_send(wmi, skb, cmd_id);
if (ret) {
ath11k_warn(ar->ab, "ftm wmi command fail: %d\n", ret);
goto out;
}
buf_len -= chunk_len;
bufpos += chunk_len;
}
++ar->ftm_msgref;
ret = 0;
out:
mutex_unlock(&ar->conf_mutex);
return ret;
}
int ath11k_tm_cmd(struct ieee80211_hw *hw, struct ieee80211_vif *vif,
void *data, int len)
{
struct ath11k *ar = hw->priv;
struct ath11k_base *ab = ar->ab;
struct nlattr *tb[ATH11K_TM_ATTR_MAX + 1];
int ret;
ret = nla_parse(tb, ATH11K_TM_ATTR_MAX, data, len, ath11k_tm_policy,
NULL);
if (ret)
return ret;
if (!tb[ATH11K_TM_ATTR_CMD])
return -EINVAL;
switch (nla_get_u32(tb[ATH11K_TM_ATTR_CMD])) {
case ATH11K_TM_CMD_WMI:
ab->ftm_segment_handler = 0;
return ath11k_tm_cmd_wmi(ar, tb, vif);
case ATH11K_TM_CMD_TESTMODE_START:
return ath11k_tm_cmd_testmode_start(ar, tb);
case ATH11K_TM_CMD_GET_VERSION:
return ath11k_tm_cmd_get_version(ar, tb);
case ATH11K_TM_CMD_WMI_FTM:
ab->ftm_segment_handler = 1;
return ath11k_tm_cmd_process_ftm(ar, tb);
case ATH11K_TM_CMD_TESTMODE_STOP:
return 0;
default:
return -EOPNOTSUPP;
}
}