blob: a45182a201f9f5da7ffa42d90d8c13a16349b4f6 [file] [log] [blame]
// SPDX-License-Identifier: BSD-3-Clause-Clear
/*
* Copyright (c) 2018-2019 The Linux Foundation. All rights reserved.
* Copyright (c) 2022, Qualcomm Innovation Center, Inc. All rights reserved.
*/
#include "core.h"
#include "debug.h"
/* World regdom to be used in case default regd from fw is unavailable */
#define ATH11K_2GHZ_CH01_11 REG_RULE(2412 - 10, 2462 + 10, 40, 0, 20, 0)
#define ATH11K_5GHZ_5150_5350 REG_RULE(5150 - 10, 5350 + 10, 80, 0, 30,\
NL80211_RRF_NO_IR)
#define ATH11K_5GHZ_5725_5850 REG_RULE(5725 - 10, 5850 + 10, 80, 0, 30,\
NL80211_RRF_NO_IR)
#define ETSI_WEATHER_RADAR_BAND_LOW 5590
#define ETSI_WEATHER_RADAR_BAND_HIGH 5650
#define ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT 600000
static const struct ieee80211_regdomain ath11k_world_regd = {
.n_reg_rules = 3,
.alpha2 = "00",
.reg_rules = {
ATH11K_2GHZ_CH01_11,
ATH11K_5GHZ_5150_5350,
ATH11K_5GHZ_5725_5850,
}
};
static bool ath11k_regdom_changes(struct ath11k *ar, char *alpha2)
{
const struct ieee80211_regdomain *regd;
regd = rcu_dereference_rtnl(ar->hw->wiphy->regd);
/* This can happen during wiphy registration where the previous
* user request is received before we update the regd received
* from firmware.
*/
if (!regd)
return true;
return memcmp(regd->alpha2, alpha2, 2) != 0;
}
static bool ath11k_reg_validate_pdev_state(struct ath11k* ar)
{
struct ath11k_base *ab = ar->ab;
struct ath11k_pdev *pdev;
struct ath11k* tmp_ar;
int i;
rcu_read_lock();
for (i = 0; i < ab->num_radios; i++) {
pdev = rcu_dereference(ab->pdevs_active[i]);
if (!pdev)
continue;
tmp_ar = pdev->ar;
if (tmp_ar) {
mutex_lock(&tmp_ar->conf_mutex);
if (tmp_ar->num_started_vdevs) {
if (tmp_ar == ar)
ath11k_warn(ab, "%s has active interface, please bring down to set country code",
wiphy_name(ar->hw->wiphy));
mutex_unlock(&tmp_ar->conf_mutex);
rcu_read_unlock();
return false;
}
mutex_unlock(&tmp_ar->conf_mutex);
}
}
rcu_read_unlock();
return true;
}
static void
ath11k_reg_notifier(struct wiphy *wiphy, struct regulatory_request *request)
{
struct ieee80211_hw *hw = wiphy_to_ieee80211_hw(wiphy);
struct wmi_init_country_params init_country_param;
struct ath11k *ar = hw->priv;
int ret;
ath11k_dbg(ar->ab, ATH11K_DBG_REG,
"Regulatory Notification received for %s\n", wiphy_name(wiphy));
/* Currently supporting only General User Hints. Cell base user
* hints to be handled later.
* Hints from other sources like Core, Beacons are not expected for
* self managed wiphy's
*/
if (!(request->initiator == NL80211_REGDOM_SET_BY_USER &&
request->user_reg_hint_type == NL80211_USER_REG_HINT_USER)) {
ath11k_warn(ar->ab, "Unexpected Regulatory event for this wiphy\n");
return;
}
if (!IS_ENABLED(CONFIG_ATH_REG_DYNAMIC_USER_REG_HINTS)) {
ath11k_dbg(ar->ab, ATH11K_DBG_REG,
"Country Setting is not allowed\n");
return;
}
if (!ath11k_regdom_changes(ar, request->alpha2)) {
ath11k_dbg(ar->ab, ATH11K_DBG_REG, "Country is already set\n");
return;
}
/* The SET_INIT_COUNTRY command should not be sent to firmware while any vdev is active.
* Also it does not make sense to give the command for certain pdev's alone.
* Hence check all the pdev's if any have an active vdev before sending the command.
*/
if (!ath11k_reg_validate_pdev_state(ar))
return;
/* Set the country code to the firmware and wait for
* the WMI_REG_CHAN_LIST_CC EVENT for updating the
* reg info
*/
init_country_param.flags = ALPHA_IS_SET;
memcpy(&init_country_param.cc_info.alpha2, request->alpha2, 2);
init_country_param.cc_info.alpha2[2] = 0;
ret = ath11k_wmi_send_init_country_cmd(ar, init_country_param);
if (ret)
ath11k_warn(ar->ab,
"INIT Country code set to fw failed : %d\n", ret);
}
void ath11k_reg_update_cc(struct ath11k_base *ab, const char *country_code)
{
struct wmi_init_country_params init_country_param;
struct ath11k_pdev *pdev;
int i, ret;
init_country_param.flags = ALPHA_IS_SET;
memcpy(&init_country_param.cc_info.alpha2, country_code, 2);
init_country_param.cc_info.alpha2[2] = 0;
for (i = 0; i < ab->num_radios; i++) {
pdev = &ab->pdevs[i];
ret = ath11k_wmi_send_init_country_cmd(pdev->ar, init_country_param);
if (ret)
ath11k_warn(pdev->ar->ab,
"INIT Country code set to fw failed : %d\n", ret);
}
}
int ath11k_reg_update_chan_list(struct ath11k *ar)
{
return ath11k_wmi_update_scan_chan_list(ar, NULL);
}
static void ath11k_copy_regd(struct ieee80211_regdomain *regd_orig,
struct ieee80211_regdomain *regd_copy)
{
u8 i;
/* The caller should have checked error conditions */
memcpy(regd_copy, regd_orig, sizeof(*regd_orig));
for (i = 0; i < regd_orig->n_reg_rules; i++)
memcpy(&regd_copy->reg_rules[i], &regd_orig->reg_rules[i],
sizeof(struct ieee80211_reg_rule));
}
int ath11k_regd_update(struct ath11k *ar, bool init)
{
struct ieee80211_regdomain *regd, *regd_copy = NULL;
int ret, regd_len, pdev_id;
struct ath11k_base *ab;
ab = ar->ab;
pdev_id = ar->pdev_idx;
spin_lock_bh(&ab->base_lock);
if (init) {
/* Apply the regd received during init through
* WMI_REG_CHAN_LIST_CC event. In case of failure to
* receive the regd, initialize with a default world
* regulatory.
*/
if (ab->default_regd[pdev_id]) {
regd = ab->default_regd[pdev_id];
} else {
ath11k_warn(ab,
"failed to receive default regd during init\n");
regd = (struct ieee80211_regdomain *)&ath11k_world_regd;
}
} else {
regd = ab->new_regd[pdev_id];
}
if (!regd) {
ret = -EINVAL;
spin_unlock_bh(&ab->base_lock);
goto err;
}
regd_len = sizeof(*regd) + (regd->n_reg_rules *
sizeof(struct ieee80211_reg_rule));
regd_copy = kzalloc(regd_len, GFP_ATOMIC);
if (regd_copy)
ath11k_copy_regd(regd, regd_copy);
spin_unlock_bh(&ab->base_lock);
if (!regd_copy) {
ret = -ENOMEM;
goto err;
}
rtnl_lock();
wiphy_lock(ar->hw->wiphy);
if (ar->afc.is_6g_afc_power_event_received || ar->afc.switch_to_lpi_indication_received)
ar->hw->wiphy->regulatory_flags |= REGULATORY_SET_BY_6GHZ_AFC;
else
ar->hw->wiphy->regulatory_flags &= ~REGULATORY_SET_BY_6GHZ_AFC;
ret = regulatory_set_wiphy_regd_sync(ar->hw->wiphy, regd_copy);
wiphy_unlock(ar->hw->wiphy);
rtnl_unlock();
kfree(regd_copy);
if (ret)
goto err;
if (ar->state == ATH11K_STATE_ON) {
ret = ath11k_reg_update_chan_list(ar);
if (ret)
goto err;
}
return 0;
err:
ath11k_warn(ab, "failed to perform regd update : %d\n", ret);
return ret;
}
static enum nl80211_dfs_regions
ath11k_map_fw_dfs_region(enum ath11k_dfs_region dfs_region)
{
switch (dfs_region) {
case ATH11K_DFS_REG_FCC:
case ATH11K_DFS_REG_CN:
return NL80211_DFS_FCC;
case ATH11K_DFS_REG_ETSI:
case ATH11K_DFS_REG_KR:
return NL80211_DFS_ETSI;
case ATH11K_DFS_REG_MKK:
case ATH11K_DFS_REG_MKK_N:
return NL80211_DFS_JP;
default:
return NL80211_DFS_UNSET;
}
}
static u32 ath11k_map_fw_reg_flags(u16 reg_flags)
{
u32 flags = 0;
if (reg_flags & REGULATORY_CHAN_NO_IR)
flags = NL80211_RRF_NO_IR;
if (reg_flags & REGULATORY_CHAN_RADAR)
flags |= NL80211_RRF_DFS;
if (reg_flags & REGULATORY_CHAN_NO_OFDM)
flags |= NL80211_RRF_NO_OFDM;
if (reg_flags & REGULATORY_CHAN_INDOOR_ONLY)
flags |= NL80211_RRF_NO_OUTDOOR;
if (reg_flags & REGULATORY_CHAN_NO_HT40)
flags |= NL80211_RRF_NO_HT40;
if (reg_flags & REGULATORY_CHAN_NO_80MHZ)
flags |= NL80211_RRF_NO_80MHZ;
if (reg_flags & REGULATORY_CHAN_NO_160MHZ)
flags |= NL80211_RRF_NO_160MHZ;
return flags;
}
static bool
ath11k_reg_can_intersect(struct ieee80211_reg_rule *rule1,
struct ieee80211_reg_rule *rule2)
{
u32 start_freq1, end_freq1;
u32 start_freq2, end_freq2;
u8 reg_6g_pwr_mode1, reg_6g_pwr_mode2;
start_freq1 = rule1->freq_range.start_freq_khz;
start_freq2 = rule2->freq_range.start_freq_khz;
end_freq1 = rule1->freq_range.end_freq_khz;
end_freq2 = rule2->freq_range.end_freq_khz;
reg_6g_pwr_mode1 = rule1->mode;
reg_6g_pwr_mode2 = rule2->mode;
/* 6G reg rules can not intersect if power mode is not same.
* NOTE: For 2G/5G rules, it will be always 0.
*/
if (reg_6g_pwr_mode1 != reg_6g_pwr_mode2)
return false;
if ((start_freq1 >= start_freq2 &&
start_freq1 < end_freq2) ||
(start_freq2 > start_freq1 &&
start_freq2 < end_freq1))
return true;
/* TODO: Should we restrict intersection feasibility
* based on min bandwidth of the intersected region also,
* say the intersected rule should have a min bandwidth
* of 20MHz?
*/
return false;
}
static void ath11k_reg_intersect_sp_rules(struct ath11k *ar,
struct ieee80211_reg_rule *rule1,
struct ieee80211_reg_rule *rule2,
struct ieee80211_reg_rule *new_rule)
{
u32 start_freq1, end_freq1;
u32 start_freq2, end_freq2;
u32 freq_diff;
start_freq1 = rule1->freq_range.start_freq_khz;
start_freq2 = rule2->freq_range.start_freq_khz;
end_freq1 = rule1->freq_range.end_freq_khz;
end_freq2 = rule2->freq_range.end_freq_khz;
new_rule->freq_range.start_freq_khz = max_t(u32, start_freq1,
start_freq2);
new_rule->freq_range.end_freq_khz = min_t(u32, end_freq1, end_freq2);
freq_diff = new_rule->freq_range.end_freq_khz -
new_rule->freq_range.start_freq_khz;
new_rule->freq_range.max_bandwidth_khz = min_t(u32, freq_diff,
rule1->freq_range.max_bandwidth_khz);
/* Assigning max eirp of rule1, as afc resp does not have
* eirp value for a frequency range
*/
new_rule->power_rule.max_eirp = rule1->power_rule.max_eirp;
/* Use the flags of both the rules */
new_rule->flags = rule1->flags | rule2->flags;
if ((rule1->flags & NL80211_RRF_PSD) && (rule2->flags & NL80211_RRF_PSD))
new_rule->psd = min_t(s8, rule1->psd, rule2->psd);
else
new_rule->flags &= ~NL80211_RRF_PSD;
new_rule->mode = NL80211_REG_AP_SP;
/* To be safe, lets use the max cac timeout of both rules */
new_rule->dfs_cac_ms = max_t(u32, rule1->dfs_cac_ms,
rule2->dfs_cac_ms);
ath11k_dbg(ar->ab, ATH11K_DBG_AFC,
"Adding sp rule start freq %u end freq %u mac bw %u max eirp %d psd %d flags 0x%x\n",
new_rule->freq_range.start_freq_khz,
new_rule->freq_range.end_freq_khz,
new_rule->freq_range.max_bandwidth_khz,
new_rule->power_rule.max_eirp, new_rule->psd, new_rule->flags);
}
static const char *
ath11k_reg_get_regdom_str(enum nl80211_dfs_regions dfs_region)
{
switch (dfs_region) {
case NL80211_DFS_FCC:
return "FCC";
case NL80211_DFS_ETSI:
return "ETSI";
case NL80211_DFS_JP:
return "JP";
default:
return "UNSET";
}
}
static u16
ath11k_reg_adjust_bw(u16 start_freq, u16 end_freq, u16 max_bw)
{
u16 bw;
bw = end_freq - start_freq;
bw = min_t(u16, bw, max_bw);
if (bw >= 80 && bw < 160)
bw = 80;
else if (bw >= 40 && bw < 80)
bw = 40;
else if (bw < 40)
bw = 20;
return bw;
}
static void
ath11k_reg_update_rule(struct ieee80211_reg_rule *reg_rule, u32 start_freq,
u32 end_freq, u32 bw, u32 ant_gain, u32 reg_pwr,
s8 psd, u32 reg_flags,
enum nl80211_regulatory_power_modes pwr_mode)
{
reg_rule->freq_range.start_freq_khz = MHZ_TO_KHZ(start_freq);
reg_rule->freq_range.end_freq_khz = MHZ_TO_KHZ(end_freq);
reg_rule->freq_range.max_bandwidth_khz = MHZ_TO_KHZ(bw);
reg_rule->power_rule.max_antenna_gain = DBI_TO_MBI(ant_gain);
reg_rule->power_rule.max_eirp = DBM_TO_MBM(reg_pwr);
reg_rule->mode = pwr_mode;
reg_rule->psd = psd;
reg_rule->flags = reg_flags;
}
static void
ath11k_reg_update_weather_radar_band(struct ath11k_base *ab,
struct ieee80211_regdomain *regd,
struct cur_reg_rule *reg_rule,
u8 *rule_idx, u32 flags, u16 max_bw)
{
u32 end_freq;
u16 bw;
u8 i;
i = *rule_idx;
bw = ath11k_reg_adjust_bw(reg_rule->start_freq,
ETSI_WEATHER_RADAR_BAND_LOW, max_bw);
ath11k_reg_update_rule(regd->reg_rules + i, reg_rule->start_freq,
ETSI_WEATHER_RADAR_BAND_LOW, bw,
reg_rule->ant_gain, reg_rule->reg_power,
reg_rule->psd_eirp, flags, 0);
ath11k_dbg(ab, ATH11K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
i + 1, reg_rule->start_freq, ETSI_WEATHER_RADAR_BAND_LOW,
bw, reg_rule->ant_gain, reg_rule->reg_power,
regd->reg_rules[i].dfs_cac_ms,
flags);
if (reg_rule->end_freq > ETSI_WEATHER_RADAR_BAND_HIGH)
end_freq = ETSI_WEATHER_RADAR_BAND_HIGH;
else
end_freq = reg_rule->end_freq;
bw = ath11k_reg_adjust_bw(ETSI_WEATHER_RADAR_BAND_LOW, end_freq,
max_bw);
i++;
ath11k_reg_update_rule(regd->reg_rules + i,
ETSI_WEATHER_RADAR_BAND_LOW, end_freq, bw,
reg_rule->ant_gain, reg_rule->reg_power,
reg_rule->psd_eirp, flags, 0);
regd->reg_rules[i].dfs_cac_ms = ETSI_WEATHER_RADAR_BAND_CAC_TIMEOUT;
ath11k_dbg(ab, ATH11K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
i + 1, ETSI_WEATHER_RADAR_BAND_LOW, end_freq,
bw, reg_rule->ant_gain, reg_rule->reg_power,
regd->reg_rules[i].dfs_cac_ms,
flags);
if (end_freq == reg_rule->end_freq) {
regd->n_reg_rules--;
*rule_idx = i;
return;
}
bw = ath11k_reg_adjust_bw(ETSI_WEATHER_RADAR_BAND_HIGH,
reg_rule->end_freq, max_bw);
i++;
ath11k_reg_update_rule(regd->reg_rules + i, ETSI_WEATHER_RADAR_BAND_HIGH,
reg_rule->end_freq, bw,
reg_rule->ant_gain, reg_rule->reg_power,
reg_rule->psd_eirp, flags, 0);
ath11k_dbg(ab, ATH11K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
i + 1, ETSI_WEATHER_RADAR_BAND_HIGH, reg_rule->end_freq,
bw, reg_rule->ant_gain, reg_rule->reg_power,
regd->reg_rules[i].dfs_cac_ms,
flags);
*rule_idx = i;
}
static void ath11k_copy_reg_rule(struct ath11k_reg_rule *ath11k_reg_rule,
struct cur_reg_rule *reg_rule)
{
if (!ath11k_reg_rule->start_freq)
ath11k_reg_rule->start_freq = reg_rule->start_freq;
if ((!ath11k_reg_rule->end_freq) ||
(ath11k_reg_rule->end_freq < reg_rule->end_freq))
ath11k_reg_rule->end_freq = reg_rule->end_freq;
}
enum wmi_reg_6g_ap_type
ath11k_ieee80211_ap_pwr_type_convert(enum ieee80211_ap_reg_power power_type)
{
switch (power_type) {
case IEEE80211_REG_LPI_AP:
return WMI_REG_INDOOR_AP;
case IEEE80211_REG_SP_AP:
return WMI_REG_STANDARD_POWER_AP;
case IEEE80211_REG_VLP_AP:
return WMI_REG_VERY_LOW_POWER_AP;
default:
return WMI_REG_MAX_AP_TYPE;
}
}
static struct cur_reg_rule
*ath11k_get_active_6g_reg_rule(struct cur_regulatory_info *reg_info,
u32 *max_bw_6g, int *max_elements,
enum nl80211_regulatory_power_modes *pwr_mode)
{
struct cur_reg_rule *reg_rule = NULL;
u8 i = 0, j = 0;
for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
if (reg_info->num_6g_reg_rules_ap[i]) {
*max_elements = reg_info->num_6g_reg_rules_ap[i];
reg_rule = reg_info->reg_rules_6g_ap_ptr[i];
*max_bw_6g = reg_info->max_bw_6g_ap[i];
reg_info->num_6g_reg_rules_ap[i] = 0;
*pwr_mode = i;
return reg_rule;
}
}
for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++) {
if (reg_info->num_6g_reg_rules_client[j][i]) {
*max_elements = reg_info->num_6g_reg_rules_client
[j][i];
reg_rule = reg_info->reg_rules_6g_client_ptr
[j][i];
*max_bw_6g = reg_info->max_bw_6g_client[j][i];
reg_info->num_6g_reg_rules_client[j][i] = 0;
*pwr_mode = WMI_REG_CURRENT_MAX_AP_TYPE * (i + 1) + j;
return reg_rule;
}
}
}
return reg_rule;
}
struct ieee80211_regdomain *
ath11k_reg_build_regd(struct ath11k_base *ab,
struct cur_regulatory_info *reg_info,
enum ieee80211_ap_reg_power power_type)
{
struct ieee80211_regdomain *new_regd = NULL;
struct cur_reg_rule *reg_rule, *reg_rule_6g;
int max_elements = 0, sp_idx = 0;
struct ath11k_6g_sp_reg_rule *sp_rule = NULL;
u8 i = 0, j = 0, k = 0, idx = 0;
u8 num_rules, num_6g_sp_rules;
u16 max_bw;
u32 flags, reg_6g_number = 0, max_bw_6g = 0;
char alpha2[3];
bool reg_6g_itr_set = false;
enum nl80211_regulatory_power_modes pwr_mode;
num_rules = reg_info->num_5g_reg_rules + reg_info->num_2g_reg_rules;
/* FIXME: Currently updating all 9 possible regulatory rules for 6G.
* For space optimization, logic can be enhanced to store reg rules
* dynamically from power, AP and STA mode combination.
*/
if (reg_info->is_ext_reg_event) {
/* All 6G APs - (LP, SP, VLP) */
for (i = 0; i < WMI_REG_CURRENT_MAX_AP_TYPE; i++) {
if (i == WMI_REG_STANDARD_POWER_AP)
num_6g_sp_rules = reg_info->num_6g_reg_rules_ap[i];
reg_6g_number += reg_info->num_6g_reg_rules_ap[i];
}
/* All 6G STAs - (LP_DEF, LP_SUB, SP_DEF, SP_SUB, VLP_DEF, VLP_SUB) */
for (i = 0; i < WMI_REG_MAX_CLIENT_TYPE; i++) {
for (j = 0; j < WMI_REG_CURRENT_MAX_AP_TYPE; j++)
reg_6g_number += reg_info->num_6g_reg_rules_client
[j][i];
}
}
ath11k_dbg(ab, ATH11K_DBG_AFC, "Number of sp rules from target %d\n",
num_6g_sp_rules);
num_rules += (reg_6g_number - num_6g_sp_rules);
if (!num_rules)
return new_regd;
/* Add max additional rules to accommodate weather radar band */
if (reg_info->dfs_region == ATH11K_DFS_REG_ETSI)
num_rules += 2;
/* 6g standard power rules should not be updated to cfg
* until we get afc data with valid rules. Hence save the sp
* rule in ab and use this to intersect with afc rules when we get
* afc power update
*/
sp_rule = kzalloc(sizeof(*sp_rule) +
(num_6g_sp_rules * sizeof(struct ieee80211_reg_rule)),
GFP_ATOMIC);
if (!sp_rule)
return new_regd;
sp_rule->num_6g_sp_rule = num_6g_sp_rules;
new_regd = kzalloc(sizeof(*new_regd) +
(num_rules * sizeof(struct ieee80211_reg_rule)),
GFP_ATOMIC);
if (!new_regd) {
kfree(sp_rule);
return new_regd;
}
memcpy(new_regd->alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
memcpy(alpha2, reg_info->alpha2, REG_ALPHA2_LEN + 1);
alpha2[2] = '\0';
new_regd->dfs_region = ath11k_map_fw_dfs_region(reg_info->dfs_region);
ath11k_dbg(ab, ATH11K_DBG_REG,
"\r\nCountry %s, CFG Regdomain %s FW Regdomain %d, num_reg_rules %d\n",
alpha2, ath11k_reg_get_regdom_str(new_regd->dfs_region),
reg_info->dfs_region, num_rules);
/* Update reg_rules[] below. Firmware is expected to
* send these rules in order(2G rules first and then 5G)
*/
for (i = 0, j = 0, idx = 0; i < num_rules + num_6g_sp_rules ; i++) {
if (reg_info->num_2g_reg_rules &&
(i < reg_info->num_2g_reg_rules)) {
reg_rule = reg_info->reg_rules_2g_ptr + i;
max_bw = min_t(u16, reg_rule->max_bw,
reg_info->max_bw_2g);
flags = 0;
pwr_mode = 0;
ath11k_copy_reg_rule(&ab->reg_rule_2g, reg_rule);
} else if (reg_info->num_5g_reg_rules &&
(j < reg_info->num_5g_reg_rules)) {
reg_rule = reg_info->reg_rules_5g_ptr + j++;
max_bw = min_t(u16, reg_rule->max_bw,
reg_info->max_bw_5g);
/* FW doesn't pass NL80211_RRF_AUTO_BW flag for
* BW Auto correction, we can enable this by default
* for all 5G rules here. The regulatory core performs
* BW correction if required and applies flags as
* per other BW rule flags we pass from here
*/
flags = NL80211_RRF_AUTO_BW;
pwr_mode = 0;
if (reg_rule->end_freq <= ATH11K_MAX_5G_FREQ)
ath11k_copy_reg_rule(&ab->reg_rule_5g, reg_rule);
else if (reg_rule->start_freq >= ATH11K_MIN_6G_FREQ)
ath11k_copy_reg_rule(&ab->reg_rule_6g, reg_rule);
} else if (reg_info->is_ext_reg_event && reg_6g_number) {
if (!reg_6g_itr_set) {
reg_rule_6g = ath11k_get_active_6g_reg_rule(reg_info, &max_bw_6g,
&max_elements, &pwr_mode);
if (!reg_rule_6g) {
ath11k_warn(ab,
"\nFetching a valid reg_rule_6g_ptr failed."
"This shouldn't happen normally. Be carefull with"
"the regulatory domain settings\n");
break;
}
reg_6g_itr_set = true;
}
if (reg_6g_itr_set && k < max_elements) {
reg_rule = reg_rule_6g + k++;
max_bw = min_t(u16, reg_rule->max_bw, max_bw_6g);
flags = NL80211_RRF_AUTO_BW;
if (reg_rule->psd_flag)
flags |= NL80211_RRF_PSD;
if (reg_rule->end_freq <= ATH11K_MAX_6G_FREQ)
ath11k_copy_reg_rule(&ab->reg_rule_6g, reg_rule);
else if (reg_rule->start_freq >= ATH11K_MIN_6G_FREQ)
ath11k_copy_reg_rule(&ab->reg_rule_6g, reg_rule);
}
if (reg_6g_itr_set && k >= max_elements) {
reg_6g_itr_set = false;
reg_rule_6g = NULL;
max_bw_6g = 0;
max_elements = 0;
k = 0;
}
reg_6g_number--;
} else {
break;
}
flags |= ath11k_map_fw_reg_flags(reg_rule->flags);
if (pwr_mode == NL80211_REG_AP_SP) {
ath11k_reg_update_rule(sp_rule->sp_reg_rule + sp_idx,
reg_rule->start_freq,
reg_rule->end_freq, max_bw,
reg_rule->ant_gain, reg_rule->reg_power,
reg_rule->psd_eirp, flags, pwr_mode);
ath11k_dbg(ab, ATH11K_DBG_AFC,
"Target sp rule freq low: %d high: %d bw: %d psd: %d flag: 0x%x\n",
reg_rule->start_freq, reg_rule->end_freq,
max_bw, reg_rule->psd_eirp, flags);
} else {
ath11k_reg_update_rule(new_regd->reg_rules + idx,
reg_rule->start_freq,
reg_rule->end_freq, max_bw,
reg_rule->ant_gain, reg_rule->reg_power,
reg_rule->psd_eirp, flags, pwr_mode);
}
/* Update dfs cac timeout if the dfs domain is ETSI and the
* new rule covers weather radar band.
* Default value of '0' corresponds to 60s timeout, so no
* need to update that for other rules.
*/
if (flags & NL80211_RRF_DFS &&
reg_info->dfs_region == ATH11K_DFS_REG_ETSI &&
(reg_rule->end_freq > ETSI_WEATHER_RADAR_BAND_LOW &&
reg_rule->start_freq < ETSI_WEATHER_RADAR_BAND_HIGH)){
ath11k_reg_update_weather_radar_band(ab, new_regd,
reg_rule, &idx,
flags, max_bw);
idx++;
continue;
}
if (reg_info->is_ext_reg_event) {
ath11k_dbg(ab, ATH11K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d) (%d, %d) (6G_POWER_MODE: %d)\n",
idx + 1, reg_rule->start_freq, reg_rule->end_freq, max_bw,
reg_rule->ant_gain, reg_rule->reg_power,
new_regd->reg_rules[idx].dfs_cac_ms, flags, reg_rule->psd_flag,
reg_rule->psd_eirp, new_regd->reg_rules[idx].mode);
} else {
ath11k_dbg(ab, ATH11K_DBG_REG,
"\t%d. (%d - %d @ %d) (%d, %d) (%d ms) (FLAGS %d)\n",
idx + 1, reg_rule->start_freq, reg_rule->end_freq,
max_bw, reg_rule->ant_gain, reg_rule->reg_power,
new_regd->reg_rules[idx].dfs_cac_ms,
flags);
}
if (pwr_mode == NL80211_REG_AP_SP)
sp_idx++;
else
idx++;
}
kfree(ab->sp_rule);
ab->sp_rule = sp_rule;
new_regd->n_reg_rules = idx;
return new_regd;
}
int ath11k_reg_get_6g_opclass_from_bw(int bw, int cfi)
{
int opclass;
switch (bw) {
case NL80211_CHAN_WIDTH_20:
opclass = 131;
/* According spec Table E-4—Global operating classes */
if (cfi == 2)
opclass = 136;
break;
case NL80211_CHAN_WIDTH_40:
opclass = 132;
break;
case NL80211_CHAN_WIDTH_80:
opclass = 133;
break;
case NL80211_CHAN_WIDTH_160:
opclass = 134;
break;
case NL80211_CHAN_WIDTH_80P80:
opclass = 135;
break;
default:
opclass = 0;
}
return opclass;
}
s8 ath11k_reg_get_afc_eirp_power(struct ath11k *ar, enum nl80211_chan_width bw, int cfi)
{
u16 eirp_pwr = 0;
struct ath11k_afc_sp_reg_info *reg_info = ar->afc.afc_reg_info;
struct ath11k_afc_chan_obj *afc_chan;
struct ath11k_chan_eirp_obj *chan_eirp;
int op_class, i, j;
spin_lock_bh(&ar->data_lock);
if (!ar->afc.afc_reg_info) {
ath11k_warn(ar->ab, "AFC power info not found\n");
goto ret;
}
op_class = ath11k_reg_get_6g_opclass_from_bw(bw, cfi);
if (!op_class) {
ath11k_warn(ar->ab, "Invalid opclass for 6g bw\n");
goto ret;
}
ath11k_dbg(ar->ab, ATH11K_DBG_AFC, "Configured BW belong to op_class %d cfi %d\n",
op_class, cfi);
for (i = 0; i < reg_info->num_chan_objs; i++) {
afc_chan = reg_info->afc_chan_info + i;
if (afc_chan->global_opclass != op_class)
continue;
for (j = 0; j < afc_chan->num_chans; j++) {
chan_eirp = afc_chan->chan_eirp_info + j;
if (chan_eirp->cfi == cfi) {
eirp_pwr = chan_eirp->eirp_power;
break;
}
}
if (eirp_pwr)
break;
}
eirp_pwr = eirp_pwr / 100;
ret:
spin_unlock_bh(&ar->data_lock);
return eirp_pwr;
}
void ath11k_reg_get_afc_eirp_power_for_bw(struct ath11k *ar, u16 *start_freq,
u16 *center_freq, int pwr_level,
struct cfg80211_chan_def *chan_def,
s8 *tx_power)
{
int bw, cfi;
if (chan_def->width == NL80211_CHAN_WIDTH_80P80 && pwr_level == 3)
*center_freq = (u16)chan_def->center_freq2;
else
*center_freq = *start_freq + (10 * (BIT(pwr_level) - 1));
/* For 20 MHz, no +10 offset is required */
if (pwr_level != 0)
*center_freq += 10;
/* power level is directly correlated to enum nl80211_chan_width
* plus one as power level starts from 0
*/
if (pwr_level < 3)
bw = pwr_level + 1;
else if (pwr_level == 3)
bw = chan_def->width;
cfi = ieee80211_frequency_to_channel(*center_freq);
*tx_power = ath11k_reg_get_afc_eirp_power(ar, bw, cfi);
}
int ath11k_reg_switch_to_lpi(struct ath11k_base *ab, struct ath11k_afc_info *afc)
{
struct ieee80211_regdomain *regd = NULL;
struct ieee80211_regdomain *new_regd = NULL;
struct ieee80211_reg_rule *old_rule, *new_regd_rules;
struct ath11k *ar = container_of(afc, struct ath11k, afc);
int new_reg_rule_cnt = 0, num_regd_rules = 0, num_sp_rules = 0;
int i, k, pdev_idx, ret = 0;
pdev_idx = ar->pdev_idx;
if (ab->new_regd[pdev_idx]) {
regd = ab->new_regd[pdev_idx];
} else {
regd = ab->default_regd[pdev_idx];
}
if (!regd) {
ath11k_warn(ab, "Regulatory domain data not present\n");
return -EINVAL;
}
num_regd_rules = regd->n_reg_rules;
for (i = 0; i < num_regd_rules; i++) {
old_rule = regd->reg_rules + i;
if (old_rule->mode == NL80211_REG_AP_SP)
num_sp_rules++;
}
new_reg_rule_cnt = num_regd_rules - num_sp_rules;
new_regd = kzalloc(sizeof(*new_regd) +
(sizeof(*new_regd_rules) * new_reg_rule_cnt),
GFP_KERNEL);
if (!new_regd)
return -ENOMEM;
new_regd->n_reg_rules = new_reg_rule_cnt;
memcpy(new_regd->alpha2, regd->alpha2, REG_ALPHA2_LEN + 1);
new_regd->dfs_region = ath11k_map_fw_dfs_region(regd->dfs_region);
k = 0;
for (i = 0; i < num_regd_rules; i++) {
old_rule = regd->reg_rules + i;
if (old_rule->mode == NL80211_REG_AP_SP) {
continue;
} else {
memcpy((new_regd->reg_rules + k), old_rule, sizeof(*new_regd_rules));
k++;
}
}
ar->afc.switch_to_lpi_indication_received = true;
spin_lock_bh(&ab->base_lock);
kfree(ab->new_regd[pdev_idx]);
ab->new_regd[pdev_idx] = new_regd;
spin_unlock_bh(&ab->base_lock);
ieee80211_queue_work(ar->hw, &ar->regd_update_work);
return ret;
}
int ath11k_reg_process_afc_power_event(struct ath11k *ar)
{
struct ath11k_base *ab = ar->ab;
struct ath11k_afc_sp_reg_info *afc_reg_info = NULL;
struct ieee80211_regdomain *regd = NULL;
struct ath11k_6g_sp_reg_rule *sp_rule = NULL;
struct ath11k_afc_freq_obj *afc_freq_info;
struct ath11k_afc_freq_obj *afc_freq_obj;
struct ieee80211_reg_rule *old_rule, *new_regd_rules;
struct ieee80211_regdomain *new_regd = NULL;
struct ieee80211_reg_rule new_rule = {0};
int new_reg_rule_cnt, num_regd_rules, num_afc_rules, num_sp_rules;
int i, j, k, pdev_idx;
int ret = 0, num_old_sp_rules = 0, num_new_sp_rules = 0;
char alpha2[3] = {0};
pdev_idx = ar->pdev_idx;
if (!ab->sp_rule || !ar->afc.afc_reg_info)
return -EINVAL;
spin_lock_bh(&ar->data_lock);
sp_rule = ab->sp_rule;
afc_reg_info = ar->afc.afc_reg_info;
afc_freq_info = afc_reg_info->afc_freq_info;
if (afc_reg_info->fw_status_code != REG_FW_AFC_POWER_EVENT_SUCCESS) {
ath11k_warn(ab, "AFC Power event failure status code %d",
afc_reg_info->fw_status_code);
ret = -EINVAL;
goto end;
}
if (!sp_rule->num_6g_sp_rule) {
ath11k_warn(ab, "No default 6g sp rules present\n");
ret = -EINVAL;
goto end;
}
if (ar->afc.is_6g_afc_power_event_received) {
ath11k_warn(ab, "Ivalid power event without expiry event\n");
ret = -EINVAL;
goto end;
}
ar->afc.is_6g_afc_power_event_received = true;
regd = ab->default_regd[pdev_idx];
if (!regd) {
ath11k_warn(ab, "Regulatory domain data not present\n");
ret = -EINVAL;
goto end;
}
num_sp_rules = ab->sp_rule->num_6g_sp_rule;
num_regd_rules = regd->n_reg_rules;
num_afc_rules = afc_reg_info->num_freq_objs;
for (i = 0; i < num_regd_rules; i++) {
old_rule = regd->reg_rules + i;
if (old_rule->mode == NL80211_REG_AP_SP)
num_old_sp_rules++;
}
for (i = 0; i < num_sp_rules; i++) {
old_rule = sp_rule->sp_reg_rule + i;
for (j = 0; j < num_afc_rules; j++) {
afc_freq_obj = afc_freq_info + j;
new_rule.freq_range.start_freq_khz =
MHZ_TO_KHZ(afc_freq_obj->low_freq);
new_rule.freq_range.end_freq_khz =
MHZ_TO_KHZ(afc_freq_obj->high_freq);
new_rule.mode = NL80211_REG_AP_SP;
if (ath11k_reg_can_intersect(old_rule, &new_rule))
num_new_sp_rules++;
}
}
if (num_new_sp_rules)
ar->afc.switch_to_lpi_indication_received = false;
/* Remove the old sp rule from regd and add the new intersected sp rules */
new_reg_rule_cnt = num_regd_rules - num_old_sp_rules + num_new_sp_rules;
ath11k_dbg(ab, ATH11K_DBG_AFC,
"Tot reg rules %d old sp rules %d new sp rules after intersection %d\n",
num_regd_rules, num_old_sp_rules, num_new_sp_rules);
new_regd = kzalloc(sizeof(*new_regd) +
(sizeof(*new_regd_rules) * new_reg_rule_cnt),
GFP_ATOMIC);
if (!new_regd) {
ret = -ENOMEM;
goto end;
}
new_regd->n_reg_rules = new_reg_rule_cnt;
memcpy(new_regd->alpha2, regd->alpha2, REG_ALPHA2_LEN + 1);
memcpy(alpha2, regd->alpha2, REG_ALPHA2_LEN + 1);
alpha2[2] = '\0';
new_regd->dfs_region = ath11k_map_fw_dfs_region(regd->dfs_region);
ath11k_dbg(ab, ATH11K_DBG_AFC,
"\nAFC: Country %s, CFG Regdomain %s, num_reg_rules %d\n",
alpha2, ath11k_reg_get_regdom_str(new_regd->dfs_region),
new_reg_rule_cnt);
k = 0;
for (i = 0; i < num_regd_rules; i++) {
old_rule = regd->reg_rules + i;
if (old_rule->mode == NL80211_REG_AP_SP) {
continue;
} else {
memcpy((new_regd->reg_rules + k), old_rule, sizeof(*new_regd_rules));
k++;
}
}
for (i = 0; i < num_sp_rules; i++) {
old_rule = sp_rule->sp_reg_rule + i;
for (j = 0; j < num_afc_rules; j++) {
afc_freq_obj = afc_freq_info + j;
new_rule.freq_range.start_freq_khz =
MHZ_TO_KHZ(afc_freq_obj->low_freq);
new_rule.freq_range.end_freq_khz =
MHZ_TO_KHZ(afc_freq_obj->high_freq);
new_rule.mode = NL80211_REG_AP_SP;
if (ath11k_reg_can_intersect(old_rule, &new_rule)) {
new_rule.psd = (s8)(afc_freq_obj->max_psd / 100);
new_rule.flags |= NL80211_RRF_PSD;
ath11k_reg_intersect_sp_rules(ar, old_rule, &new_rule,
new_regd->reg_rules + k);
k++;
}
}
}
spin_unlock_bh(&ar->data_lock);
spin_lock_bh(&ab->base_lock);
kfree(ab->new_regd[pdev_idx]);
ab->new_regd[pdev_idx] = new_regd;
spin_unlock_bh(&ab->base_lock);
ieee80211_queue_work(ar->hw, &ar->regd_update_work);
return ret;
end:
spin_unlock_bh(&ar->data_lock);
return ret;
}
int ath11k_copy_afc_response(struct ath11k *ar, char *afc_resp, u32 len)
{
struct ath11k_base *ab = ar->ab;
struct target_mem_chunk *target_mem = ab->qmi.target_mem;
void *mem = NULL;
int i;
int slotid = ar->pdev_idx;
u32 *status;
if (len > AFC_SLOT_SIZE) {
ath11k_warn(ab, "len %d greater than slot size\n", len);
return -EINVAL;
}
for (i = 0; i < ab->qmi.mem_seg_count; i++) {
if (target_mem[i].type == AFC_REGION_TYPE) {
mem = target_mem[i].vaddr;
status = mem + (slotid * AFC_SLOT_SIZE);
break;
}
}
if (!mem) {
ath11k_warn(ab, "AFC mem is not available\n");
return -ENOMEM;
}
status[AFC_AUTH_STATUS_OFFSET] = cpu_to_le32(AFC_AUTH_ERROR);
if (ab->userpd_id) {
memset_io(mem + (slotid * AFC_SLOT_SIZE), 0, AFC_SLOT_SIZE);
memcpy_toio(mem + (slotid * AFC_SLOT_SIZE), afc_resp, len);
} else {
memset(mem + (slotid * AFC_SLOT_SIZE), 0, AFC_SLOT_SIZE);
memcpy(mem + (slotid * AFC_SLOT_SIZE), afc_resp, len);
}
status[AFC_AUTH_STATUS_OFFSET] = cpu_to_le32(AFC_AUTH_SUCCESS);
return 0;
}
int ath11k_reg_afc_start(struct ath11k_base *ab, struct ath11k_afc_info *afc)
{
struct ath11k *ar = container_of(afc, struct ath11k, afc);
struct ath11k_afc_req_fixed_params *fixed_param = NULL;
int ret = 0;
fixed_param = kzalloc(sizeof(*fixed_param), GFP_ATOMIC);
if (!fixed_param)
return -ENOMEM;
fixed_param->req_id = afc->request_id;
fixed_param->min_des_power = DEFAULT_MIN_POWER;
fixed_param->req_length = sizeof(*fixed_param);
fixed_param->status_code = ATH11K_AFC_POWER_UPDATE_IGNORE;
ret = ath11k_send_afc_start(ar, fixed_param);
kfree(fixed_param);
return ret;
}
int ath11k_process_expiry_event(struct ath11k_base *ab, struct ath11k_afc_info *afc)
{
int ret;
ath11k_dbg(ab, ATH11K_DBG_AFC, "AFC expiry event subtype %d\n",
afc->event_subtype);
afc->is_6g_afc_power_event_received = false;
switch (afc->event_subtype) {
case REG_AFC_EXPIRY_EVENT_START:
case REG_AFC_EXPIRY_EVENT_RENEW:
afc->is_6g_afc_expiry_event_received = true;
ret = ath11k_reg_afc_start(ab, afc);
if (ret) {
ath11k_warn(ab, "Failed to notify expiry event\n");
return ret;
}
break;
case REG_AFC_EXPIRY_EVENT_SWITCH_TO_LPI:
ath11k_dbg(ab, ATH11K_DBG_AFC, "AFC switch to LPI indication received\n");
ret = ath11k_reg_switch_to_lpi(ab, afc);
return ret;
default:
ath11k_dbg(ab, ATH11K_DBG_AFC, "Invalid AFC expiry event subtype %d\n",
afc->event_subtype);
}
return 0;
}
void ath11k_regd_update_work(struct work_struct *work)
{
struct ath11k *ar = container_of(work, struct ath11k,
regd_update_work);
int ret;
ret = ath11k_regd_update(ar, false);
if (ret) {
/* Firmware has already moved to the new regd. We need
* to maintain channel consistency across FW, Host driver
* and userspace. Hence as a fallback mechanism we can set
* the prev or default country code to the firmware.
*/
/* TODO: Implement Fallback Mechanism */
}
}
void ath11k_reg_init(struct ath11k *ar)
{
ar->hw->wiphy->regulatory_flags = REGULATORY_WIPHY_SELF_MANAGED;
ar->hw->wiphy->reg_notifier = ath11k_reg_notifier;
}
void ath11k_reg_free(struct ath11k_base *ab)
{
int i;
for (i = 0; i < ab->hw_params.max_radios; i++) {
kfree(ab->default_regd[i]);
kfree(ab->new_regd[i]);
}
}