| /* |
| * This file is part of wl1271 |
| * |
| * Copyright (C) 2008-2010 Nokia Corporation |
| * |
| * Contact: Luciano Coelho <luciano.coelho@nokia.com> |
| * |
| * This program is free software; you can redistribute it and/or |
| * modify it under the terms of the GNU General Public License |
| * version 2 as published by the Free Software Foundation. |
| * |
| * This program is distributed in the hope that it will be useful, but |
| * WITHOUT ANY WARRANTY; without even the implied warranty of |
| * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| * General Public License for more details. |
| * |
| * You should have received a copy of the GNU General Public License |
| * along with this program; if not, write to the Free Software |
| * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA |
| * 02110-1301 USA |
| * |
| */ |
| |
| #include <linux/module.h> |
| #include <linux/firmware.h> |
| #include <linux/delay.h> |
| #include <linux/spi/spi.h> |
| #include <linux/crc32.h> |
| #include <linux/etherdevice.h> |
| #include <linux/vmalloc.h> |
| #include <linux/platform_device.h> |
| #include <linux/slab.h> |
| #include <linux/wl12xx.h> |
| #include <linux/sched.h> |
| |
| #include "wl12xx.h" |
| #include "wl12xx_80211.h" |
| #include "reg.h" |
| #include "io.h" |
| #include "event.h" |
| #include "tx.h" |
| #include "rx.h" |
| #include "ps.h" |
| #include "init.h" |
| #include "debugfs.h" |
| #include "cmd.h" |
| #include "boot.h" |
| #include "testmode.h" |
| #include "scan.h" |
| |
| #define WL1271_BOOT_RETRIES 3 |
| |
| static struct conf_drv_settings default_conf = { |
| .sg = { |
| .params = { |
| [CONF_SG_ACL_BT_MASTER_MIN_BR] = 10, |
| [CONF_SG_ACL_BT_MASTER_MAX_BR] = 180, |
| [CONF_SG_ACL_BT_SLAVE_MIN_BR] = 10, |
| [CONF_SG_ACL_BT_SLAVE_MAX_BR] = 180, |
| [CONF_SG_ACL_BT_MASTER_MIN_EDR] = 10, |
| [CONF_SG_ACL_BT_MASTER_MAX_EDR] = 80, |
| [CONF_SG_ACL_BT_SLAVE_MIN_EDR] = 10, |
| [CONF_SG_ACL_BT_SLAVE_MAX_EDR] = 80, |
| [CONF_SG_ACL_WLAN_PS_MASTER_BR] = 8, |
| [CONF_SG_ACL_WLAN_PS_SLAVE_BR] = 8, |
| [CONF_SG_ACL_WLAN_PS_MASTER_EDR] = 20, |
| [CONF_SG_ACL_WLAN_PS_SLAVE_EDR] = 20, |
| [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_BR] = 20, |
| [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_BR] = 35, |
| [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_BR] = 16, |
| [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_BR] = 35, |
| [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MIN_EDR] = 32, |
| [CONF_SG_ACL_WLAN_ACTIVE_MASTER_MAX_EDR] = 50, |
| [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MIN_EDR] = 28, |
| [CONF_SG_ACL_WLAN_ACTIVE_SLAVE_MAX_EDR] = 50, |
| [CONF_SG_ACL_ACTIVE_SCAN_WLAN_BR] = 10, |
| [CONF_SG_ACL_ACTIVE_SCAN_WLAN_EDR] = 20, |
| [CONF_SG_ACL_PASSIVE_SCAN_BT_BR] = 75, |
| [CONF_SG_ACL_PASSIVE_SCAN_WLAN_BR] = 15, |
| [CONF_SG_ACL_PASSIVE_SCAN_BT_EDR] = 27, |
| [CONF_SG_ACL_PASSIVE_SCAN_WLAN_EDR] = 17, |
| /* active scan params */ |
| [CONF_SG_AUTO_SCAN_PROBE_REQ] = 170, |
| [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_HV3] = 50, |
| [CONF_SG_ACTIVE_SCAN_DURATION_FACTOR_A2DP] = 100, |
| /* passive scan params */ |
| [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_BR] = 800, |
| [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_A2DP_EDR] = 200, |
| [CONF_SG_PASSIVE_SCAN_DURATION_FACTOR_HV3] = 200, |
| /* passive scan in dual antenna params */ |
| [CONF_SG_CONSECUTIVE_HV3_IN_PASSIVE_SCAN] = 0, |
| [CONF_SG_BCN_HV3_COLLISION_THRESH_IN_PASSIVE_SCAN] = 0, |
| [CONF_SG_TX_RX_PROTECTION_BWIDTH_IN_PASSIVE_SCAN] = 0, |
| /* general params */ |
| [CONF_SG_STA_FORCE_PS_IN_BT_SCO] = 1, |
| [CONF_SG_ANTENNA_CONFIGURATION] = 0, |
| [CONF_SG_BEACON_MISS_PERCENT] = 60, |
| [CONF_SG_DHCP_TIME] = 5000, |
| [CONF_SG_RXT] = 1200, |
| [CONF_SG_TXT] = 1000, |
| [CONF_SG_ADAPTIVE_RXT_TXT] = 1, |
| [CONF_SG_GENERAL_USAGE_BIT_MAP] = 3, |
| [CONF_SG_HV3_MAX_SERVED] = 6, |
| [CONF_SG_PS_POLL_TIMEOUT] = 10, |
| [CONF_SG_UPSD_TIMEOUT] = 10, |
| [CONF_SG_CONSECUTIVE_CTS_THRESHOLD] = 2, |
| [CONF_SG_STA_RX_WINDOW_AFTER_DTIM] = 5, |
| [CONF_SG_STA_CONNECTION_PROTECTION_TIME] = 30, |
| /* AP params */ |
| [CONF_AP_BEACON_MISS_TX] = 3, |
| [CONF_AP_RX_WINDOW_AFTER_BEACON] = 10, |
| [CONF_AP_BEACON_WINDOW_INTERVAL] = 2, |
| [CONF_AP_CONNECTION_PROTECTION_TIME] = 0, |
| [CONF_AP_BT_ACL_VAL_BT_SERVE_TIME] = 25, |
| [CONF_AP_BT_ACL_VAL_WL_SERVE_TIME] = 25, |
| }, |
| .state = CONF_SG_PROTECTIVE, |
| }, |
| .rx = { |
| .rx_msdu_life_time = 512000, |
| .packet_detection_threshold = 0, |
| .ps_poll_timeout = 15, |
| .upsd_timeout = 15, |
| .rts_threshold = IEEE80211_MAX_RTS_THRESHOLD, |
| .rx_cca_threshold = 0, |
| .irq_blk_threshold = 0xFFFF, |
| .irq_pkt_threshold = 0, |
| .irq_timeout = 600, |
| .queue_type = CONF_RX_QUEUE_TYPE_LOW_PRIORITY, |
| }, |
| .tx = { |
| .tx_energy_detection = 0, |
| .sta_rc_conf = { |
| .enabled_rates = 0, |
| .short_retry_limit = 10, |
| .long_retry_limit = 10, |
| .aflags = 0, |
| }, |
| .ac_conf_count = 4, |
| .ac_conf = { |
| [CONF_TX_AC_BE] = { |
| .ac = CONF_TX_AC_BE, |
| .cw_min = 15, |
| .cw_max = 63, |
| .aifsn = 3, |
| .tx_op_limit = 0, |
| }, |
| [CONF_TX_AC_BK] = { |
| .ac = CONF_TX_AC_BK, |
| .cw_min = 15, |
| .cw_max = 63, |
| .aifsn = 7, |
| .tx_op_limit = 0, |
| }, |
| [CONF_TX_AC_VI] = { |
| .ac = CONF_TX_AC_VI, |
| .cw_min = 15, |
| .cw_max = 63, |
| .aifsn = CONF_TX_AIFS_PIFS, |
| .tx_op_limit = 3008, |
| }, |
| [CONF_TX_AC_VO] = { |
| .ac = CONF_TX_AC_VO, |
| .cw_min = 15, |
| .cw_max = 63, |
| .aifsn = CONF_TX_AIFS_PIFS, |
| .tx_op_limit = 1504, |
| }, |
| }, |
| .max_tx_retries = 100, |
| .ap_aging_period = 300, |
| .tid_conf_count = 4, |
| .tid_conf = { |
| [CONF_TX_AC_BE] = { |
| .queue_id = CONF_TX_AC_BE, |
| .channel_type = CONF_CHANNEL_TYPE_EDCF, |
| .tsid = CONF_TX_AC_BE, |
| .ps_scheme = CONF_PS_SCHEME_LEGACY, |
| .ack_policy = CONF_ACK_POLICY_LEGACY, |
| .apsd_conf = {0, 0}, |
| }, |
| [CONF_TX_AC_BK] = { |
| .queue_id = CONF_TX_AC_BK, |
| .channel_type = CONF_CHANNEL_TYPE_EDCF, |
| .tsid = CONF_TX_AC_BK, |
| .ps_scheme = CONF_PS_SCHEME_LEGACY, |
| .ack_policy = CONF_ACK_POLICY_LEGACY, |
| .apsd_conf = {0, 0}, |
| }, |
| [CONF_TX_AC_VI] = { |
| .queue_id = CONF_TX_AC_VI, |
| .channel_type = CONF_CHANNEL_TYPE_EDCF, |
| .tsid = CONF_TX_AC_VI, |
| .ps_scheme = CONF_PS_SCHEME_LEGACY, |
| .ack_policy = CONF_ACK_POLICY_LEGACY, |
| .apsd_conf = {0, 0}, |
| }, |
| [CONF_TX_AC_VO] = { |
| .queue_id = CONF_TX_AC_VO, |
| .channel_type = CONF_CHANNEL_TYPE_EDCF, |
| .tsid = CONF_TX_AC_VO, |
| .ps_scheme = CONF_PS_SCHEME_LEGACY, |
| .ack_policy = CONF_ACK_POLICY_LEGACY, |
| .apsd_conf = {0, 0}, |
| }, |
| }, |
| .frag_threshold = IEEE80211_MAX_FRAG_THRESHOLD, |
| .tx_compl_timeout = 700, |
| .tx_compl_threshold = 4, |
| .basic_rate = CONF_HW_BIT_RATE_1MBPS, |
| .basic_rate_5 = CONF_HW_BIT_RATE_6MBPS, |
| .tmpl_short_retry_limit = 10, |
| .tmpl_long_retry_limit = 10, |
| }, |
| .conn = { |
| .wake_up_event = CONF_WAKE_UP_EVENT_DTIM, |
| .listen_interval = 1, |
| .bcn_filt_mode = CONF_BCN_FILT_MODE_ENABLED, |
| .bcn_filt_ie_count = 2, |
| .bcn_filt_ie = { |
| [0] = { |
| .ie = WLAN_EID_CHANNEL_SWITCH, |
| .rule = CONF_BCN_RULE_PASS_ON_APPEARANCE, |
| }, |
| [1] = { |
| .ie = WLAN_EID_HT_INFORMATION, |
| .rule = CONF_BCN_RULE_PASS_ON_CHANGE, |
| }, |
| }, |
| .synch_fail_thold = 10, |
| .bss_lose_timeout = 100, |
| .beacon_rx_timeout = 10000, |
| .broadcast_timeout = 20000, |
| .rx_broadcast_in_ps = 1, |
| .ps_poll_threshold = 10, |
| .ps_poll_recovery_period = 700, |
| .bet_enable = CONF_BET_MODE_ENABLE, |
| .bet_max_consecutive = 50, |
| .psm_entry_retries = 8, |
| .psm_exit_retries = 16, |
| .psm_entry_nullfunc_retries = 3, |
| .keep_alive_interval = 55000, |
| .max_listen_interval = 20, |
| }, |
| .itrim = { |
| .enable = false, |
| .timeout = 50000, |
| }, |
| .pm_config = { |
| .host_clk_settling_time = 5000, |
| .host_fast_wakeup_support = false |
| }, |
| .roam_trigger = { |
| .trigger_pacing = 1, |
| .avg_weight_rssi_beacon = 20, |
| .avg_weight_rssi_data = 10, |
| .avg_weight_snr_beacon = 20, |
| .avg_weight_snr_data = 10, |
| }, |
| .scan = { |
| .min_dwell_time_active = 7500, |
| .max_dwell_time_active = 30000, |
| .min_dwell_time_passive = 100000, |
| .max_dwell_time_passive = 100000, |
| .num_probe_reqs = 2, |
| }, |
| .sched_scan = { |
| /* sched_scan requires dwell times in TU instead of TU/1000 */ |
| .min_dwell_time_active = 30, |
| .max_dwell_time_active = 60, |
| .dwell_time_passive = 100, |
| .dwell_time_dfs = 150, |
| .num_probe_reqs = 2, |
| .rssi_threshold = -90, |
| .snr_threshold = 0, |
| }, |
| .rf = { |
| .tx_per_channel_power_compensation_2 = { |
| 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
| }, |
| .tx_per_channel_power_compensation_5 = { |
| 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
| 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
| 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, |
| }, |
| }, |
| .ht = { |
| .rx_ba_win_size = 8, |
| .tx_ba_win_size = 64, |
| .inactivity_timeout = 10000, |
| .tx_ba_tid_bitmap = CONF_TX_BA_ENABLED_TID_BITMAP, |
| }, |
| .mem_wl127x = { |
| .num_stations = 1, |
| .ssid_profiles = 1, |
| .rx_block_num = 70, |
| .tx_min_block_num = 40, |
| .dynamic_memory = 1, |
| .min_req_tx_blocks = 100, |
| .min_req_rx_blocks = 22, |
| .tx_min = 27, |
| }, |
| .mem_wl128x = { |
| .num_stations = 1, |
| .ssid_profiles = 1, |
| .rx_block_num = 40, |
| .tx_min_block_num = 40, |
| .dynamic_memory = 1, |
| .min_req_tx_blocks = 45, |
| .min_req_rx_blocks = 22, |
| .tx_min = 27, |
| }, |
| .fm_coex = { |
| .enable = true, |
| .swallow_period = 5, |
| .n_divider_fref_set_1 = 0xff, /* default */ |
| .n_divider_fref_set_2 = 12, |
| .m_divider_fref_set_1 = 148, |
| .m_divider_fref_set_2 = 0xffff, /* default */ |
| .coex_pll_stabilization_time = 0xffffffff, /* default */ |
| .ldo_stabilization_time = 0xffff, /* default */ |
| .fm_disturbed_band_margin = 0xff, /* default */ |
| .swallow_clk_diff = 0xff, /* default */ |
| }, |
| .rx_streaming = { |
| .duration = 150, |
| .queues = 0x1, |
| .interval = 20, |
| .always = 0, |
| }, |
| .fwlog = { |
| .mode = WL12XX_FWLOG_ON_DEMAND, |
| .mem_blocks = 2, |
| .severity = 0, |
| .timestamp = WL12XX_FWLOG_TIMESTAMP_DISABLED, |
| .output = WL12XX_FWLOG_OUTPUT_HOST, |
| .threshold = 0, |
| }, |
| .hci_io_ds = HCI_IO_DS_6MA, |
| .rate = { |
| .rate_retry_score = 32000, |
| .per_add = 8192, |
| .per_th1 = 2048, |
| .per_th2 = 4096, |
| .max_per = 8100, |
| .inverse_curiosity_factor = 5, |
| .tx_fail_low_th = 4, |
| .tx_fail_high_th = 10, |
| .per_alpha_shift = 4, |
| .per_add_shift = 13, |
| .per_beta1_shift = 10, |
| .per_beta2_shift = 8, |
| .rate_check_up = 2, |
| .rate_check_down = 12, |
| .rate_retry_policy = { |
| 0x00, 0x00, 0x00, 0x00, 0x00, |
| 0x00, 0x00, 0x00, 0x00, 0x00, |
| 0x00, 0x00, 0x00, |
| }, |
| }, |
| .hangover = { |
| .recover_time = 0, |
| .hangover_period = 20, |
| .dynamic_mode = 1, |
| .early_termination_mode = 1, |
| .max_period = 20, |
| .min_period = 1, |
| .increase_delta = 1, |
| .decrease_delta = 2, |
| .quiet_time = 4, |
| .increase_time = 1, |
| .window_size = 16, |
| }, |
| }; |
| |
| static char *fwlog_param; |
| static bool bug_on_recovery; |
| |
| static void __wl1271_op_remove_interface(struct wl1271 *wl, |
| bool reset_tx_queues); |
| static void wl1271_free_ap_keys(struct wl1271 *wl); |
| |
| |
| static void wl1271_device_release(struct device *dev) |
| { |
| |
| } |
| |
| static struct platform_device wl1271_device = { |
| .name = "wl1271", |
| .id = -1, |
| |
| /* device model insists to have a release function */ |
| .dev = { |
| .release = wl1271_device_release, |
| }, |
| }; |
| |
| static DEFINE_MUTEX(wl_list_mutex); |
| static LIST_HEAD(wl_list); |
| |
| static int wl1271_check_operstate(struct wl1271 *wl, unsigned char operstate) |
| { |
| int ret; |
| if (operstate != IF_OPER_UP) |
| return 0; |
| |
| if (test_and_set_bit(WL1271_FLAG_STA_STATE_SENT, &wl->flags)) |
| return 0; |
| |
| ret = wl12xx_cmd_set_peer_state(wl, wl->sta_hlid); |
| if (ret < 0) |
| return ret; |
| |
| wl12xx_croc(wl, wl->role_id); |
| |
| wl1271_info("Association completed."); |
| return 0; |
| } |
| static int wl1271_dev_notify(struct notifier_block *me, unsigned long what, |
| void *arg) |
| { |
| struct net_device *dev = arg; |
| struct wireless_dev *wdev; |
| struct wiphy *wiphy; |
| struct ieee80211_hw *hw; |
| struct wl1271 *wl; |
| struct wl1271 *wl_temp; |
| int ret = 0; |
| |
| /* Check that this notification is for us. */ |
| if (what != NETDEV_CHANGE) |
| return NOTIFY_DONE; |
| |
| wdev = dev->ieee80211_ptr; |
| if (wdev == NULL) |
| return NOTIFY_DONE; |
| |
| wiphy = wdev->wiphy; |
| if (wiphy == NULL) |
| return NOTIFY_DONE; |
| |
| hw = wiphy_priv(wiphy); |
| if (hw == NULL) |
| return NOTIFY_DONE; |
| |
| wl_temp = hw->priv; |
| mutex_lock(&wl_list_mutex); |
| list_for_each_entry(wl, &wl_list, list) { |
| if (wl == wl_temp) |
| break; |
| } |
| mutex_unlock(&wl_list_mutex); |
| if (wl != wl_temp) |
| return NOTIFY_DONE; |
| |
| mutex_lock(&wl->mutex); |
| |
| if (wl->state == WL1271_STATE_OFF) |
| goto out; |
| |
| if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) |
| goto out; |
| |
| ret = wl1271_ps_elp_wakeup(wl); |
| if (ret < 0) |
| goto out; |
| |
| wl1271_check_operstate(wl, dev->operstate); |
| |
| wl1271_ps_elp_sleep(wl); |
| |
| out: |
| mutex_unlock(&wl->mutex); |
| |
| return NOTIFY_OK; |
| } |
| |
| static int wl1271_reg_notify(struct wiphy *wiphy, |
| struct regulatory_request *request) |
| { |
| struct ieee80211_supported_band *band; |
| struct ieee80211_channel *ch; |
| int i; |
| |
| band = wiphy->bands[IEEE80211_BAND_5GHZ]; |
| for (i = 0; i < band->n_channels; i++) { |
| ch = &band->channels[i]; |
| if (ch->flags & IEEE80211_CHAN_DISABLED) |
| continue; |
| |
| if (ch->flags & IEEE80211_CHAN_RADAR) |
| ch->flags |= IEEE80211_CHAN_NO_IBSS | |
| IEEE80211_CHAN_PASSIVE_SCAN; |
| |
| } |
| |
| return 0; |
| } |
| |
| static int wl1271_set_rx_streaming(struct wl1271 *wl, bool enable) |
| { |
| int ret = 0; |
| |
| /* we should hold wl->mutex */ |
| ret = wl1271_acx_ps_rx_streaming(wl, enable); |
| if (ret < 0) |
| goto out; |
| |
| if (enable) |
| set_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags); |
| else |
| clear_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags); |
| out: |
| return ret; |
| } |
| |
| /* |
| * this function is being called when the rx_streaming interval |
| * has beed changed or rx_streaming should be disabled |
| */ |
| int wl1271_recalc_rx_streaming(struct wl1271 *wl) |
| { |
| int ret = 0; |
| int period = wl->conf.rx_streaming.interval; |
| |
| /* don't reconfigure if rx_streaming is disabled */ |
| if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags)) |
| goto out; |
| |
| /* reconfigure/disable according to new streaming_period */ |
| if (period && |
| test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) && |
| (wl->conf.rx_streaming.always || |
| test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) |
| ret = wl1271_set_rx_streaming(wl, true); |
| else { |
| ret = wl1271_set_rx_streaming(wl, false); |
| /* don't cancel_work_sync since we might deadlock */ |
| del_timer_sync(&wl->rx_streaming_timer); |
| } |
| out: |
| return ret; |
| } |
| |
| static void wl1271_rx_streaming_enable_work(struct work_struct *work) |
| { |
| int ret; |
| struct wl1271 *wl = |
| container_of(work, struct wl1271, rx_streaming_enable_work); |
| |
| mutex_lock(&wl->mutex); |
| |
| if (test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags) || |
| !test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) || |
| (!wl->conf.rx_streaming.always && |
| !test_bit(WL1271_FLAG_SOFT_GEMINI, &wl->flags))) |
| goto out; |
| |
| if (!wl->conf.rx_streaming.interval) |
| goto out; |
| |
| ret = wl1271_ps_elp_wakeup(wl); |
| if (ret < 0) |
| goto out; |
| |
| ret = wl1271_set_rx_streaming(wl, true); |
| if (ret < 0) |
| goto out_sleep; |
| |
| /* stop it after some time of inactivity */ |
| mod_timer(&wl->rx_streaming_timer, |
| jiffies + msecs_to_jiffies(wl->conf.rx_streaming.duration)); |
| |
| out_sleep: |
| wl1271_ps_elp_sleep(wl); |
| out: |
| mutex_unlock(&wl->mutex); |
| } |
| |
| static void wl1271_rx_streaming_disable_work(struct work_struct *work) |
| { |
| int ret; |
| struct wl1271 *wl = |
| container_of(work, struct wl1271, rx_streaming_disable_work); |
| |
| mutex_lock(&wl->mutex); |
| |
| if (!test_bit(WL1271_FLAG_RX_STREAMING_STARTED, &wl->flags)) |
| goto out; |
| |
| ret = wl1271_ps_elp_wakeup(wl); |
| if (ret < 0) |
| goto out; |
| |
| ret = wl1271_set_rx_streaming(wl, false); |
| if (ret) |
| goto out_sleep; |
| |
| out_sleep: |
| wl1271_ps_elp_sleep(wl); |
| out: |
| mutex_unlock(&wl->mutex); |
| } |
| |
| static void wl1271_rx_streaming_timer(unsigned long data) |
| { |
| struct wl1271 *wl = (struct wl1271 *)data; |
| ieee80211_queue_work(wl->hw, &wl->rx_streaming_disable_work); |
| } |
| |
| static void wl1271_conf_init(struct wl1271 *wl) |
| { |
| |
| /* |
| * This function applies the default configuration to the driver. This |
| * function is invoked upon driver load (spi probe.) |
| * |
| * The configuration is stored in a run-time structure in order to |
| * facilitate for run-time adjustment of any of the parameters. Making |
| * changes to the configuration structure will apply the new values on |
| * the next interface up (wl1271_op_start.) |
| */ |
| |
| /* apply driver default configuration */ |
| memcpy(&wl->conf, &default_conf, sizeof(default_conf)); |
| |
| /* Adjust settings according to optional module parameters */ |
| if (fwlog_param) { |
| if (!strcmp(fwlog_param, "continuous")) { |
| wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS; |
| } else if (!strcmp(fwlog_param, "ondemand")) { |
| wl->conf.fwlog.mode = WL12XX_FWLOG_ON_DEMAND; |
| } else if (!strcmp(fwlog_param, "dbgpins")) { |
| wl->conf.fwlog.mode = WL12XX_FWLOG_CONTINUOUS; |
| wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_DBG_PINS; |
| } else if (!strcmp(fwlog_param, "disable")) { |
| wl->conf.fwlog.mem_blocks = 0; |
| wl->conf.fwlog.output = WL12XX_FWLOG_OUTPUT_NONE; |
| } else { |
| wl1271_error("Unknown fwlog parameter %s", fwlog_param); |
| } |
| } |
| } |
| |
| static int wl1271_plt_init(struct wl1271 *wl) |
| { |
| struct conf_tx_ac_category *conf_ac; |
| struct conf_tx_tid *conf_tid; |
| int ret, i; |
| |
| if (wl->chip.id == CHIP_ID_1283_PG20) |
| ret = wl128x_cmd_general_parms(wl); |
| else |
| ret = wl1271_cmd_general_parms(wl); |
| if (ret < 0) |
| return ret; |
| |
| if (wl->chip.id == CHIP_ID_1283_PG20) |
| ret = wl128x_cmd_radio_parms(wl); |
| else |
| ret = wl1271_cmd_radio_parms(wl); |
| if (ret < 0) |
| return ret; |
| |
| if (wl->chip.id != CHIP_ID_1283_PG20) { |
| ret = wl1271_cmd_ext_radio_parms(wl); |
| if (ret < 0) |
| return ret; |
| } |
| if (ret < 0) |
| return ret; |
| |
| /* Chip-specific initializations */ |
| ret = wl1271_chip_specific_init(wl); |
| if (ret < 0) |
| return ret; |
| |
| ret = wl1271_sta_init_templates_config(wl); |
| if (ret < 0) |
| return ret; |
| |
| ret = wl1271_acx_init_mem_config(wl); |
| if (ret < 0) |
| return ret; |
| |
| /* PHY layer config */ |
| ret = wl1271_init_phy_config(wl); |
| if (ret < 0) |
| goto out_free_memmap; |
| |
| ret = wl1271_acx_dco_itrim_params(wl); |
| if (ret < 0) |
| goto out_free_memmap; |
| |
| /* Initialize connection monitoring thresholds */ |
| ret = wl1271_acx_conn_monit_params(wl, false); |
| if (ret < 0) |
| goto out_free_memmap; |
| |
| /* Bluetooth WLAN coexistence */ |
| ret = wl1271_init_pta(wl); |
| if (ret < 0) |
| goto out_free_memmap; |
| |
| /* FM WLAN coexistence */ |
| ret = wl1271_acx_fm_coex(wl); |
| if (ret < 0) |
| goto out_free_memmap; |
| |
| /* Energy detection */ |
| ret = wl1271_init_energy_detection(wl); |
| if (ret < 0) |
| goto out_free_memmap; |
| |
| ret = wl12xx_acx_mem_cfg(wl); |
| if (ret < 0) |
| goto out_free_memmap; |
| |
| /* Default fragmentation threshold */ |
| ret = wl1271_acx_frag_threshold(wl, wl->conf.tx.frag_threshold); |
| if (ret < 0) |
| goto out_free_memmap; |
| |
| /* Default TID/AC configuration */ |
| BUG_ON(wl->conf.tx.tid_conf_count != wl->conf.tx.ac_conf_count); |
| for (i = 0; i < wl->conf.tx.tid_conf_count; i++) { |
| conf_ac = &wl->conf.tx.ac_conf[i]; |
| ret = wl1271_acx_ac_cfg(wl, conf_ac->ac, conf_ac->cw_min, |
| conf_ac->cw_max, conf_ac->aifsn, |
| conf_ac->tx_op_limit); |
| if (ret < 0) |
| goto out_free_memmap; |
| |
| conf_tid = &wl->conf.tx.tid_conf[i]; |
| ret = wl1271_acx_tid_cfg(wl, conf_tid->queue_id, |
| conf_tid->channel_type, |
| conf_tid->tsid, |
| conf_tid->ps_scheme, |
| conf_tid->ack_policy, |
| conf_tid->apsd_conf[0], |
| conf_tid->apsd_conf[1]); |
| if (ret < 0) |
| goto out_free_memmap; |
| } |
| |
| /* Enable data path */ |
| ret = wl1271_cmd_data_path(wl, 1); |
| if (ret < 0) |
| goto out_free_memmap; |
| |
| /* Configure for CAM power saving (ie. always active) */ |
| ret = wl1271_acx_sleep_auth(wl, WL1271_PSM_CAM); |
| if (ret < 0) |
| goto out_free_memmap; |
| |
| /* configure PM */ |
| ret = wl1271_acx_pm_config(wl); |
| if (ret < 0) |
| goto out_free_memmap; |
| |
| return 0; |
| |
| out_free_memmap: |
| kfree(wl->target_mem_map); |
| wl->target_mem_map = NULL; |
| |
| return ret; |
| } |
| |
| static void wl12xx_irq_ps_regulate_link(struct wl1271 *wl, u8 hlid, u8 tx_pkts) |
| { |
| bool fw_ps, single_sta; |
| |
| /* only regulate station links */ |
| if (hlid < WL1271_AP_STA_HLID_START) |
| return; |
| |
| fw_ps = test_bit(hlid, (unsigned long *)&wl->ap_fw_ps_map); |
| single_sta = (wl->active_sta_count == 1); |
| |
| /* |
| * Wake up from high level PS if the STA is asleep with too little |
| * packets in FW or if the STA is awake. |
| */ |
| if (!fw_ps || tx_pkts < WL1271_PS_STA_MAX_PACKETS) |
| wl1271_ps_link_end(wl, hlid); |
| |
| /* |
| * Start high-level PS if the STA is asleep with enough blocks in FW. |
| * Make an exception if this is the only connected station. In this |
| * case FW-memory congestion is not a problem. |
| */ |
| else if (!single_sta && fw_ps && tx_pkts >= WL1271_PS_STA_MAX_PACKETS) |
| wl1271_ps_link_start(wl, hlid, true); |
| } |
| |
| bool wl1271_is_active_sta(struct wl1271 *wl, u8 hlid) |
| { |
| int id; |
| |
| /* global/broadcast "stations" are always active */ |
| if (hlid < WL1271_AP_STA_HLID_START) |
| return true; |
| |
| id = hlid - WL1271_AP_STA_HLID_START; |
| return test_bit(id, wl->ap_hlid_map); |
| } |
| |
| static void wl12xx_irq_update_links_status(struct wl1271 *wl, |
| struct wl12xx_fw_status *status) |
| { |
| u32 cur_fw_ps_map; |
| u8 hlid, cnt; |
| |
| /* TODO: also use link_fast_bitmap here */ |
| |
| cur_fw_ps_map = le32_to_cpu(status->link_ps_bitmap); |
| if (wl->ap_fw_ps_map != cur_fw_ps_map) { |
| wl1271_debug(DEBUG_PSM, |
| "link ps prev 0x%x cur 0x%x changed 0x%x", |
| wl->ap_fw_ps_map, cur_fw_ps_map, |
| wl->ap_fw_ps_map ^ cur_fw_ps_map); |
| |
| wl->ap_fw_ps_map = cur_fw_ps_map; |
| } |
| |
| for (hlid = WL1271_AP_STA_HLID_START; hlid < AP_MAX_LINKS; hlid++) { |
| if (!wl1271_is_active_sta(wl, hlid)) |
| continue; |
| |
| cnt = status->tx_lnk_free_pkts[hlid] - |
| wl->links[hlid].prev_freed_pkts; |
| |
| wl->links[hlid].prev_freed_pkts = |
| status->tx_lnk_free_pkts[hlid]; |
| wl->links[hlid].allocated_pkts -= cnt; |
| |
| wl12xx_irq_ps_regulate_link(wl, hlid, |
| wl->links[hlid].allocated_pkts); |
| } |
| } |
| |
| static void wl12xx_fw_status(struct wl1271 *wl, |
| struct wl12xx_fw_status *status) |
| { |
| struct timespec ts; |
| u32 old_tx_blk_count = wl->tx_blocks_available; |
| int avail, freed_blocks; |
| int i; |
| |
| wl1271_raw_read(wl, FW_STATUS_ADDR, status, sizeof(*status), false); |
| |
| wl1271_debug(DEBUG_IRQ, "intr: 0x%x (fw_rx_counter = %d, " |
| "drv_rx_counter = %d, tx_results_counter = %d)", |
| status->intr, |
| status->fw_rx_counter, |
| status->drv_rx_counter, |
| status->tx_results_counter); |
| |
| for (i = 0; i < NUM_TX_QUEUES; i++) { |
| /* prevent wrap-around in freed-packets counter */ |
| wl->tx_allocated_pkts[i] -= |
| (status->tx_released_pkts[i] - |
| wl->tx_pkts_freed[i]) & 0xff; |
| |
| wl->tx_pkts_freed[i] = status->tx_released_pkts[i]; |
| } |
| |
| /* prevent wrap-around in total blocks counter */ |
| if (likely(wl->tx_blocks_freed <= |
| le32_to_cpu(status->total_released_blks))) |
| freed_blocks = le32_to_cpu(status->total_released_blks) - |
| wl->tx_blocks_freed; |
| else |
| freed_blocks = 0x100000000LL - wl->tx_blocks_freed + |
| le32_to_cpu(status->total_released_blks); |
| |
| wl->tx_blocks_freed = le32_to_cpu(status->total_released_blks); |
| |
| wl->tx_allocated_blocks -= freed_blocks; |
| |
| avail = le32_to_cpu(status->tx_total) - wl->tx_allocated_blocks; |
| |
| /* |
| * The FW might change the total number of TX memblocks before |
| * we get a notification about blocks being released. Thus, the |
| * available blocks calculation might yield a temporary result |
| * which is lower than the actual available blocks. Keeping in |
| * mind that only blocks that were allocated can be moved from |
| * TX to RX, tx_blocks_available should never decrease here. |
| */ |
| wl->tx_blocks_available = max((int)wl->tx_blocks_available, |
| avail); |
| |
| /* if more blocks are available now, tx work can be scheduled */ |
| if (wl->tx_blocks_available > old_tx_blk_count) |
| clear_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags); |
| |
| /* for AP update num of allocated TX blocks per link and ps status */ |
| if (wl->bss_type == BSS_TYPE_AP_BSS) |
| wl12xx_irq_update_links_status(wl, status); |
| |
| /* update the host-chipset time offset */ |
| getnstimeofday(&ts); |
| wl->time_offset = (timespec_to_ns(&ts) >> 10) - |
| (s64)le32_to_cpu(status->fw_localtime); |
| } |
| |
| static void wl1271_flush_deferred_work(struct wl1271 *wl) |
| { |
| struct sk_buff *skb; |
| |
| /* Pass all received frames to the network stack */ |
| while ((skb = skb_dequeue(&wl->deferred_rx_queue))) |
| ieee80211_rx_ni(wl->hw, skb); |
| |
| /* Return sent skbs to the network stack */ |
| while ((skb = skb_dequeue(&wl->deferred_tx_queue))) |
| ieee80211_tx_status_ni(wl->hw, skb); |
| } |
| |
| static void wl1271_netstack_work(struct work_struct *work) |
| { |
| struct wl1271 *wl = |
| container_of(work, struct wl1271, netstack_work); |
| |
| do { |
| wl1271_flush_deferred_work(wl); |
| } while (skb_queue_len(&wl->deferred_rx_queue)); |
| } |
| |
| #define WL1271_IRQ_MAX_LOOPS 256 |
| |
| irqreturn_t wl1271_irq(int irq, void *cookie) |
| { |
| int ret; |
| u32 intr; |
| int loopcount = WL1271_IRQ_MAX_LOOPS; |
| struct wl1271 *wl = (struct wl1271 *)cookie; |
| bool done = false; |
| unsigned int defer_count; |
| unsigned long flags; |
| |
| /* TX might be handled here, avoid redundant work */ |
| set_bit(WL1271_FLAG_TX_PENDING, &wl->flags); |
| cancel_work_sync(&wl->tx_work); |
| |
| /* |
| * In case edge triggered interrupt must be used, we cannot iterate |
| * more than once without introducing race conditions with the hardirq. |
| */ |
| if (wl->platform_quirks & WL12XX_PLATFORM_QUIRK_EDGE_IRQ) |
| loopcount = 1; |
| |
| mutex_lock(&wl->mutex); |
| |
| wl1271_debug(DEBUG_IRQ, "IRQ work"); |
| |
| if (unlikely(wl->state == WL1271_STATE_OFF)) |
| goto out; |
| |
| ret = wl1271_ps_elp_wakeup(wl); |
| if (ret < 0) |
| goto out; |
| |
| while (!done && loopcount--) { |
| /* |
| * In order to avoid a race with the hardirq, clear the flag |
| * before acknowledging the chip. Since the mutex is held, |
| * wl1271_ps_elp_wakeup cannot be called concurrently. |
| */ |
| clear_bit(WL1271_FLAG_IRQ_RUNNING, &wl->flags); |
| smp_mb__after_clear_bit(); |
| |
| wl12xx_fw_status(wl, wl->fw_status); |
| intr = le32_to_cpu(wl->fw_status->intr); |
| intr &= WL1271_INTR_MASK; |
| if (!intr) { |
| done = true; |
| continue; |
| } |
| |
| if (unlikely(intr & WL1271_ACX_INTR_WATCHDOG)) { |
| wl1271_error("watchdog interrupt received! " |
| "starting recovery."); |
| wl12xx_queue_recovery_work(wl); |
| |
| /* restarting the chip. ignore any other interrupt. */ |
| goto out; |
| } |
| |
| if (likely(intr & WL1271_ACX_INTR_DATA)) { |
| wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_DATA"); |
| |
| wl12xx_rx(wl, wl->fw_status); |
| |
| /* Check if any tx blocks were freed */ |
| spin_lock_irqsave(&wl->wl_lock, flags); |
| if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) && |
| wl1271_tx_total_queue_count(wl) > 0) { |
| spin_unlock_irqrestore(&wl->wl_lock, flags); |
| /* |
| * In order to avoid starvation of the TX path, |
| * call the work function directly. |
| */ |
| wl1271_tx_work_locked(wl); |
| } else { |
| spin_unlock_irqrestore(&wl->wl_lock, flags); |
| } |
| |
| /* check for tx results */ |
| if (wl->fw_status->tx_results_counter != |
| (wl->tx_results_count & 0xff)) |
| wl1271_tx_complete(wl); |
| |
| /* Make sure the deferred queues don't get too long */ |
| defer_count = skb_queue_len(&wl->deferred_tx_queue) + |
| skb_queue_len(&wl->deferred_rx_queue); |
| if (defer_count > WL1271_DEFERRED_QUEUE_LIMIT) |
| wl1271_flush_deferred_work(wl); |
| } |
| |
| if (intr & WL1271_ACX_INTR_EVENT_A) { |
| wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_A"); |
| wl1271_event_handle(wl, 0); |
| } |
| |
| if (intr & WL1271_ACX_INTR_EVENT_B) { |
| wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_EVENT_B"); |
| wl1271_event_handle(wl, 1); |
| } |
| |
| if (intr & WL1271_ACX_INTR_INIT_COMPLETE) |
| wl1271_debug(DEBUG_IRQ, |
| "WL1271_ACX_INTR_INIT_COMPLETE"); |
| |
| if (intr & WL1271_ACX_INTR_HW_AVAILABLE) |
| wl1271_debug(DEBUG_IRQ, "WL1271_ACX_INTR_HW_AVAILABLE"); |
| } |
| |
| wl1271_ps_elp_sleep(wl); |
| |
| out: |
| spin_lock_irqsave(&wl->wl_lock, flags); |
| /* In case TX was not handled here, queue TX work */ |
| clear_bit(WL1271_FLAG_TX_PENDING, &wl->flags); |
| if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) && |
| wl1271_tx_total_queue_count(wl) > 0) |
| ieee80211_queue_work(wl->hw, &wl->tx_work); |
| spin_unlock_irqrestore(&wl->wl_lock, flags); |
| |
| mutex_unlock(&wl->mutex); |
| |
| return IRQ_HANDLED; |
| } |
| EXPORT_SYMBOL_GPL(wl1271_irq); |
| |
| static int wl1271_fetch_firmware(struct wl1271 *wl) |
| { |
| const struct firmware *fw; |
| const char *fw_name; |
| int ret; |
| |
| if (wl->chip.id == CHIP_ID_1283_PG20) |
| fw_name = WL128X_FW_NAME; |
| else |
| fw_name = WL127X_FW_NAME; |
| |
| wl1271_debug(DEBUG_BOOT, "booting firmware %s", fw_name); |
| |
| ret = request_firmware(&fw, fw_name, wl1271_wl_to_dev(wl)); |
| |
| if (ret < 0) { |
| wl1271_error("could not get firmware: %d", ret); |
| return ret; |
| } |
| |
| if (fw->size % 4) { |
| wl1271_error("firmware size is not multiple of 32 bits: %zu", |
| fw->size); |
| ret = -EILSEQ; |
| goto out; |
| } |
| |
| vfree(wl->fw); |
| wl->fw_len = fw->size; |
| wl->fw = vmalloc(wl->fw_len); |
| |
| if (!wl->fw) { |
| wl1271_error("could not allocate memory for the firmware"); |
| ret = -ENOMEM; |
| goto out; |
| } |
| |
| memcpy(wl->fw, fw->data, wl->fw_len); |
| ret = 0; |
| |
| out: |
| release_firmware(fw); |
| |
| return ret; |
| } |
| |
| static int wl1271_fetch_nvs(struct wl1271 *wl) |
| { |
| const struct firmware *fw; |
| int ret; |
| |
| ret = request_firmware(&fw, WL12XX_NVS_NAME, wl1271_wl_to_dev(wl)); |
| |
| if (ret < 0) { |
| wl1271_error("could not get nvs file: %d", ret); |
| return ret; |
| } |
| |
| wl->nvs = kmemdup(fw->data, fw->size, GFP_KERNEL); |
| |
| if (!wl->nvs) { |
| wl1271_error("could not allocate memory for the nvs file"); |
| ret = -ENOMEM; |
| goto out; |
| } |
| |
| wl->nvs_len = fw->size; |
| |
| out: |
| release_firmware(fw); |
| |
| return ret; |
| } |
| |
| void wl12xx_queue_recovery_work(struct wl1271 *wl) |
| { |
| if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) |
| ieee80211_queue_work(wl->hw, &wl->recovery_work); |
| } |
| |
| size_t wl12xx_copy_fwlog(struct wl1271 *wl, u8 *memblock, size_t maxlen) |
| { |
| size_t len = 0; |
| |
| /* The FW log is a length-value list, find where the log end */ |
| while (len < maxlen) { |
| if (memblock[len] == 0) |
| break; |
| if (len + memblock[len] + 1 > maxlen) |
| break; |
| len += memblock[len] + 1; |
| } |
| |
| /* Make sure we have enough room */ |
| len = min(len, (size_t)(PAGE_SIZE - wl->fwlog_size)); |
| |
| /* Fill the FW log file, consumed by the sysfs fwlog entry */ |
| memcpy(wl->fwlog + wl->fwlog_size, memblock, len); |
| wl->fwlog_size += len; |
| |
| return len; |
| } |
| |
| static void wl12xx_read_fwlog_panic(struct wl1271 *wl) |
| { |
| u32 addr; |
| u32 first_addr; |
| u8 *block; |
| |
| if ((wl->quirks & WL12XX_QUIRK_FWLOG_NOT_IMPLEMENTED) || |
| (wl->conf.fwlog.mode != WL12XX_FWLOG_ON_DEMAND) || |
| (wl->conf.fwlog.mem_blocks == 0)) |
| return; |
| |
| wl1271_info("Reading FW panic log"); |
| |
| block = kmalloc(WL12XX_HW_BLOCK_SIZE, GFP_KERNEL); |
| if (!block) |
| return; |
| |
| /* |
| * Make sure the chip is awake and the logger isn't active. |
| * This might fail if the firmware hanged. |
| */ |
| if (!wl1271_ps_elp_wakeup(wl)) |
| wl12xx_cmd_stop_fwlog(wl); |
| |
| /* Read the first memory block address */ |
| wl12xx_fw_status(wl, wl->fw_status); |
| first_addr = le32_to_cpu(wl->fw_status->log_start_addr); |
| if (!first_addr) |
| goto out; |
| |
| /* Traverse the memory blocks linked list */ |
| addr = first_addr; |
| do { |
| memset(block, 0, WL12XX_HW_BLOCK_SIZE); |
| wl1271_read_hwaddr(wl, addr, block, WL12XX_HW_BLOCK_SIZE, |
| false); |
| |
| /* |
| * Memory blocks are linked to one another. The first 4 bytes |
| * of each memory block hold the hardware address of the next |
| * one. The last memory block points to the first one. |
| */ |
| addr = le32_to_cpup((__le32 *)block); |
| if (!wl12xx_copy_fwlog(wl, block + sizeof(addr), |
| WL12XX_HW_BLOCK_SIZE - sizeof(addr))) |
| break; |
| } while (addr && (addr != first_addr)); |
| |
| wake_up_interruptible(&wl->fwlog_waitq); |
| |
| out: |
| kfree(block); |
| } |
| |
| static void wl1271_recovery_work(struct work_struct *work) |
| { |
| struct wl1271 *wl = |
| container_of(work, struct wl1271, recovery_work); |
| |
| mutex_lock(&wl->mutex); |
| |
| if (wl->state != WL1271_STATE_ON) |
| goto out; |
| |
| /* Avoid a recursive recovery */ |
| set_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags); |
| |
| wl12xx_read_fwlog_panic(wl); |
| |
| wl1271_info("Hardware recovery in progress. FW ver: %s pc: 0x%x", |
| wl->chip.fw_ver_str, wl1271_read32(wl, SCR_PAD4)); |
| |
| BUG_ON(bug_on_recovery); |
| |
| /* |
| * Advance security sequence number to overcome potential progress |
| * in the firmware during recovery. This doens't hurt if the network is |
| * not encrypted. |
| */ |
| if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags) || |
| test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) |
| wl->tx_security_seq += WL1271_TX_SQN_POST_RECOVERY_PADDING; |
| |
| /* Prevent spurious TX during FW restart */ |
| ieee80211_stop_queues(wl->hw); |
| |
| if (wl->sched_scanning) { |
| ieee80211_sched_scan_stopped(wl->hw); |
| wl->sched_scanning = false; |
| } |
| |
| /* reboot the chipset */ |
| __wl1271_op_remove_interface(wl, false); |
| |
| clear_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags); |
| |
| ieee80211_restart_hw(wl->hw); |
| |
| /* |
| * Its safe to enable TX now - the queues are stopped after a request |
| * to restart the HW. |
| */ |
| ieee80211_wake_queues(wl->hw); |
| |
| out: |
| mutex_unlock(&wl->mutex); |
| } |
| |
| static void wl1271_fw_wakeup(struct wl1271 *wl) |
| { |
| u32 elp_reg; |
| |
| elp_reg = ELPCTRL_WAKE_UP; |
| wl1271_raw_write32(wl, HW_ACCESS_ELP_CTRL_REG_ADDR, elp_reg); |
| } |
| |
| static int wl1271_setup(struct wl1271 *wl) |
| { |
| wl->fw_status = kmalloc(sizeof(*wl->fw_status), GFP_KERNEL); |
| if (!wl->fw_status) |
| return -ENOMEM; |
| |
| wl->tx_res_if = kmalloc(sizeof(*wl->tx_res_if), GFP_KERNEL); |
| if (!wl->tx_res_if) { |
| kfree(wl->fw_status); |
| return -ENOMEM; |
| } |
| |
| return 0; |
| } |
| |
| static int wl1271_chip_wakeup(struct wl1271 *wl) |
| { |
| struct wl1271_partition_set partition; |
| int ret = 0; |
| |
| msleep(WL1271_PRE_POWER_ON_SLEEP); |
| ret = wl1271_power_on(wl); |
| if (ret < 0) |
| goto out; |
| msleep(WL1271_POWER_ON_SLEEP); |
| wl1271_io_reset(wl); |
| wl1271_io_init(wl); |
| |
| /* We don't need a real memory partition here, because we only want |
| * to use the registers at this point. */ |
| memset(&partition, 0, sizeof(partition)); |
| partition.reg.start = REGISTERS_BASE; |
| partition.reg.size = REGISTERS_DOWN_SIZE; |
| wl1271_set_partition(wl, &partition); |
| |
| /* ELP module wake up */ |
| wl1271_fw_wakeup(wl); |
| |
| /* whal_FwCtrl_BootSm() */ |
| |
| /* 0. read chip id from CHIP_ID */ |
| wl->chip.id = wl1271_read32(wl, CHIP_ID_B); |
| |
| /* 1. check if chip id is valid */ |
| |
| switch (wl->chip.id) { |
| case CHIP_ID_1271_PG10: |
| wl1271_warning("chip id 0x%x (1271 PG10) support is obsolete", |
| wl->chip.id); |
| |
| ret = wl1271_setup(wl); |
| if (ret < 0) |
| goto out; |
| break; |
| case CHIP_ID_1271_PG20: |
| wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1271 PG20)", |
| wl->chip.id); |
| |
| ret = wl1271_setup(wl); |
| if (ret < 0) |
| goto out; |
| break; |
| case CHIP_ID_1283_PG20: |
| wl1271_debug(DEBUG_BOOT, "chip id 0x%x (1283 PG20)", |
| wl->chip.id); |
| |
| ret = wl1271_setup(wl); |
| if (ret < 0) |
| goto out; |
| |
| if (wl1271_set_block_size(wl)) |
| wl->quirks |= WL12XX_QUIRK_BLOCKSIZE_ALIGNMENT; |
| break; |
| case CHIP_ID_1283_PG10: |
| default: |
| wl1271_warning("unsupported chip id: 0x%x", wl->chip.id); |
| ret = -ENODEV; |
| goto out; |
| } |
| |
| if (wl->fw == NULL) { |
| ret = wl1271_fetch_firmware(wl); |
| if (ret < 0) |
| goto out; |
| } |
| |
| /* No NVS from netlink, try to get it from the filesystem */ |
| if (wl->nvs == NULL) { |
| ret = wl1271_fetch_nvs(wl); |
| if (ret < 0) |
| goto out; |
| } |
| |
| out: |
| return ret; |
| } |
| |
| int wl1271_plt_start(struct wl1271 *wl) |
| { |
| int retries = WL1271_BOOT_RETRIES; |
| struct wiphy *wiphy = wl->hw->wiphy; |
| int ret; |
| |
| mutex_lock(&wl->mutex); |
| |
| wl1271_notice("power up"); |
| |
| if (wl->state != WL1271_STATE_OFF) { |
| wl1271_error("cannot go into PLT state because not " |
| "in off state: %d", wl->state); |
| ret = -EBUSY; |
| goto out; |
| } |
| |
| wl->bss_type = BSS_TYPE_STA_BSS; |
| |
| while (retries) { |
| retries--; |
| ret = wl1271_chip_wakeup(wl); |
| if (ret < 0) |
| goto power_off; |
| |
| ret = wl1271_boot(wl); |
| if (ret < 0) |
| goto power_off; |
| |
| ret = wl1271_plt_init(wl); |
| if (ret < 0) |
| goto irq_disable; |
| |
| wl->state = WL1271_STATE_PLT; |
| wl1271_notice("firmware booted in PLT mode (%s)", |
| wl->chip.fw_ver_str); |
| |
| /* update hw/fw version info in wiphy struct */ |
| wiphy->hw_version = wl->chip.id; |
| strncpy(wiphy->fw_version, wl->chip.fw_ver_str, |
| sizeof(wiphy->fw_version)); |
| |
| goto out; |
| |
| irq_disable: |
| mutex_unlock(&wl->mutex); |
| /* Unlocking the mutex in the middle of handling is |
| inherently unsafe. In this case we deem it safe to do, |
| because we need to let any possibly pending IRQ out of |
| the system (and while we are WL1271_STATE_OFF the IRQ |
| work function will not do anything.) Also, any other |
| possible concurrent operations will fail due to the |
| current state, hence the wl1271 struct should be safe. */ |
| wl1271_disable_interrupts(wl); |
| wl1271_flush_deferred_work(wl); |
| cancel_work_sync(&wl->netstack_work); |
| mutex_lock(&wl->mutex); |
| power_off: |
| wl1271_power_off(wl); |
| } |
| |
| wl1271_error("firmware boot in PLT mode failed despite %d retries", |
| WL1271_BOOT_RETRIES); |
| out: |
| mutex_unlock(&wl->mutex); |
| |
| return ret; |
| } |
| |
| static int __wl1271_plt_stop(struct wl1271 *wl) |
| { |
| int ret = 0; |
| |
| wl1271_notice("power down"); |
| |
| if (wl->state != WL1271_STATE_PLT) { |
| wl1271_error("cannot power down because not in PLT " |
| "state: %d", wl->state); |
| ret = -EBUSY; |
| goto out; |
| } |
| |
| wl1271_power_off(wl); |
| |
| wl->state = WL1271_STATE_OFF; |
| wl->rx_counter = 0; |
| |
| mutex_unlock(&wl->mutex); |
| wl1271_disable_interrupts(wl); |
| wl1271_flush_deferred_work(wl); |
| cancel_work_sync(&wl->netstack_work); |
| cancel_work_sync(&wl->recovery_work); |
| mutex_lock(&wl->mutex); |
| out: |
| return ret; |
| } |
| |
| int wl1271_plt_stop(struct wl1271 *wl) |
| { |
| int ret; |
| |
| mutex_lock(&wl->mutex); |
| ret = __wl1271_plt_stop(wl); |
| mutex_unlock(&wl->mutex); |
| return ret; |
| } |
| |
| static void wl1271_op_tx(struct ieee80211_hw *hw, struct sk_buff *skb) |
| { |
| struct wl1271 *wl = hw->priv; |
| unsigned long flags; |
| int q, mapping; |
| u8 hlid = 0; |
| |
| mapping = skb_get_queue_mapping(skb); |
| q = wl1271_tx_get_queue(mapping); |
| |
| if (wl->bss_type == BSS_TYPE_AP_BSS) |
| hlid = wl12xx_tx_get_hlid_ap(wl, skb); |
| |
| spin_lock_irqsave(&wl->wl_lock, flags); |
| |
| /* queue the packet */ |
| if (wl->bss_type == BSS_TYPE_AP_BSS) { |
| if (!wl1271_is_active_sta(wl, hlid)) { |
| wl1271_debug(DEBUG_TX, "DROP skb hlid %d q %d", |
| hlid, q); |
| dev_kfree_skb(skb); |
| goto out; |
| } |
| |
| wl1271_debug(DEBUG_TX, "queue skb hlid %d q %d", hlid, q); |
| skb_queue_tail(&wl->links[hlid].tx_queue[q], skb); |
| } else { |
| skb_queue_tail(&wl->tx_queue[q], skb); |
| } |
| |
| wl->tx_queue_count[q]++; |
| |
| /* |
| * The workqueue is slow to process the tx_queue and we need stop |
| * the queue here, otherwise the queue will get too long. |
| */ |
| if (wl->tx_queue_count[q] >= WL1271_TX_QUEUE_HIGH_WATERMARK) { |
| wl1271_debug(DEBUG_TX, "op_tx: stopping queues for q %d", q); |
| ieee80211_stop_queue(wl->hw, mapping); |
| set_bit(q, &wl->stopped_queues_map); |
| } |
| |
| /* |
| * The chip specific setup must run before the first TX packet - |
| * before that, the tx_work will not be initialized! |
| */ |
| |
| if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags) && |
| !test_bit(WL1271_FLAG_TX_PENDING, &wl->flags)) |
| ieee80211_queue_work(wl->hw, &wl->tx_work); |
| |
| out: |
| spin_unlock_irqrestore(&wl->wl_lock, flags); |
| } |
| |
| int wl1271_tx_dummy_packet(struct wl1271 *wl) |
| { |
| unsigned long flags; |
| int q; |
| |
| /* no need to queue a new dummy packet if one is already pending */ |
| if (test_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags)) |
| return 0; |
| |
| q = wl1271_tx_get_queue(skb_get_queue_mapping(wl->dummy_packet)); |
| |
| spin_lock_irqsave(&wl->wl_lock, flags); |
| set_bit(WL1271_FLAG_DUMMY_PACKET_PENDING, &wl->flags); |
| wl->tx_queue_count[q]++; |
| spin_unlock_irqrestore(&wl->wl_lock, flags); |
| |
| /* The FW is low on RX memory blocks, so send the dummy packet asap */ |
| if (!test_bit(WL1271_FLAG_FW_TX_BUSY, &wl->flags)) |
| wl1271_tx_work_locked(wl); |
| |
| /* |
| * If the FW TX is busy, TX work will be scheduled by the threaded |
| * interrupt handler function |
| */ |
| return 0; |
| } |
| |
| /* |
| * The size of the dummy packet should be at least 1400 bytes. However, in |
| * order to minimize the number of bus transactions, aligning it to 512 bytes |
| * boundaries could be beneficial, performance wise |
| */ |
| #define TOTAL_TX_DUMMY_PACKET_SIZE (ALIGN(1400, 512)) |
| |
| static struct sk_buff *wl12xx_alloc_dummy_packet(struct wl1271 *wl) |
| { |
| struct sk_buff *skb; |
| struct ieee80211_hdr_3addr *hdr; |
| unsigned int dummy_packet_size; |
| |
| dummy_packet_size = TOTAL_TX_DUMMY_PACKET_SIZE - |
| sizeof(struct wl1271_tx_hw_descr) - sizeof(*hdr); |
| |
| skb = dev_alloc_skb(TOTAL_TX_DUMMY_PACKET_SIZE); |
| if (!skb) { |
| wl1271_warning("Failed to allocate a dummy packet skb"); |
| return NULL; |
| } |
| |
| skb_reserve(skb, sizeof(struct wl1271_tx_hw_descr)); |
| |
| hdr = (struct ieee80211_hdr_3addr *) skb_put(skb, sizeof(*hdr)); |
| memset(hdr, 0, sizeof(*hdr)); |
| hdr->frame_control = cpu_to_le16(IEEE80211_FTYPE_DATA | |
| IEEE80211_STYPE_NULLFUNC | |
| IEEE80211_FCTL_TODS); |
| |
| memset(skb_put(skb, dummy_packet_size), 0, dummy_packet_size); |
| |
| /* Dummy packets require the TID to be management */ |
| skb->priority = WL1271_TID_MGMT; |
| |
| /* Initialize all fields that might be used */ |
| skb_set_queue_mapping(skb, 0); |
| memset(IEEE80211_SKB_CB(skb), 0, sizeof(struct ieee80211_tx_info)); |
| |
| return skb; |
| } |
| |
| |
| static struct notifier_block wl1271_dev_notifier = { |
| .notifier_call = wl1271_dev_notify, |
| }; |
| |
| #ifdef CONFIG_PM |
| static int wl1271_configure_suspend_sta(struct wl1271 *wl) |
| { |
| int ret = 0; |
| |
| mutex_lock(&wl->mutex); |
| |
| if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) |
| goto out_unlock; |
| |
| ret = wl1271_ps_elp_wakeup(wl); |
| if (ret < 0) |
| goto out_unlock; |
| |
| /* enter psm if needed*/ |
| if (!test_bit(WL1271_FLAG_PSM, &wl->flags)) { |
| DECLARE_COMPLETION_ONSTACK(compl); |
| |
| wl->ps_compl = &compl; |
| ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE, |
| wl->basic_rate, true); |
| if (ret < 0) |
| goto out_sleep; |
| |
| /* we must unlock here so we will be able to get events */ |
| wl1271_ps_elp_sleep(wl); |
| mutex_unlock(&wl->mutex); |
| |
| ret = wait_for_completion_timeout( |
| &compl, msecs_to_jiffies(WL1271_PS_COMPLETE_TIMEOUT)); |
| if (ret <= 0) { |
| wl1271_warning("couldn't enter ps mode!"); |
| ret = -EBUSY; |
| goto out; |
| } |
| |
| /* take mutex again, and wakeup */ |
| mutex_lock(&wl->mutex); |
| |
| ret = wl1271_ps_elp_wakeup(wl); |
| if (ret < 0) |
| goto out_unlock; |
| } |
| out_sleep: |
| wl1271_ps_elp_sleep(wl); |
| out_unlock: |
| mutex_unlock(&wl->mutex); |
| out: |
| return ret; |
| |
| } |
| |
| static int wl1271_configure_suspend_ap(struct wl1271 *wl) |
| { |
| int ret = 0; |
| |
| mutex_lock(&wl->mutex); |
| |
| if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) |
| goto out_unlock; |
| |
| ret = wl1271_ps_elp_wakeup(wl); |
| if (ret < 0) |
| goto out_unlock; |
| |
| ret = wl1271_acx_beacon_filter_opt(wl, true); |
| |
| wl1271_ps_elp_sleep(wl); |
| out_unlock: |
| mutex_unlock(&wl->mutex); |
| return ret; |
| |
| } |
| |
| static int wl1271_configure_suspend(struct wl1271 *wl) |
| { |
| if (wl->bss_type == BSS_TYPE_STA_BSS) |
| return wl1271_configure_suspend_sta(wl); |
| if (wl->bss_type == BSS_TYPE_AP_BSS) |
| return wl1271_configure_suspend_ap(wl); |
| return 0; |
| } |
| |
| static void wl1271_configure_resume(struct wl1271 *wl) |
| { |
| int ret; |
| bool is_sta = wl->bss_type == BSS_TYPE_STA_BSS; |
| bool is_ap = wl->bss_type == BSS_TYPE_AP_BSS; |
| |
| if (!is_sta && !is_ap) |
| return; |
| |
| mutex_lock(&wl->mutex); |
| ret = wl1271_ps_elp_wakeup(wl); |
| if (ret < 0) |
| goto out; |
| |
| if (is_sta) { |
| /* exit psm if it wasn't configured */ |
| if (!test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) |
| wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE, |
| wl->basic_rate, true); |
| } else if (is_ap) { |
| wl1271_acx_beacon_filter_opt(wl, false); |
| } |
| |
| wl1271_ps_elp_sleep(wl); |
| out: |
| mutex_unlock(&wl->mutex); |
| } |
| |
| static int wl1271_op_suspend(struct ieee80211_hw *hw, |
| struct cfg80211_wowlan *wow) |
| { |
| struct wl1271 *wl = hw->priv; |
| int ret; |
| |
| wl1271_debug(DEBUG_MAC80211, "mac80211 suspend wow=%d", !!wow); |
| WARN_ON(!wow || !wow->any); |
| |
| wl->wow_enabled = true; |
| ret = wl1271_configure_suspend(wl); |
| if (ret < 0) { |
| wl1271_warning("couldn't prepare device to suspend"); |
| return ret; |
| } |
| /* flush any remaining work */ |
| wl1271_debug(DEBUG_MAC80211, "flushing remaining works"); |
| |
| /* |
| * disable and re-enable interrupts in order to flush |
| * the threaded_irq |
| */ |
| wl1271_disable_interrupts(wl); |
| |
| /* |
| * set suspended flag to avoid triggering a new threaded_irq |
| * work. no need for spinlock as interrupts are disabled. |
| */ |
| set_bit(WL1271_FLAG_SUSPENDED, &wl->flags); |
| |
| wl1271_enable_interrupts(wl); |
| flush_work(&wl->tx_work); |
| flush_delayed_work(&wl->pspoll_work); |
| flush_delayed_work(&wl->elp_work); |
| |
| return 0; |
| } |
| |
| static int wl1271_op_resume(struct ieee80211_hw *hw) |
| { |
| struct wl1271 *wl = hw->priv; |
| unsigned long flags; |
| bool run_irq_work = false; |
| |
| wl1271_debug(DEBUG_MAC80211, "mac80211 resume wow=%d", |
| wl->wow_enabled); |
| WARN_ON(!wl->wow_enabled); |
| |
| /* |
| * re-enable irq_work enqueuing, and call irq_work directly if |
| * there is a pending work. |
| */ |
| spin_lock_irqsave(&wl->wl_lock, flags); |
| clear_bit(WL1271_FLAG_SUSPENDED, &wl->flags); |
| if (test_and_clear_bit(WL1271_FLAG_PENDING_WORK, &wl->flags)) |
| run_irq_work = true; |
| spin_unlock_irqrestore(&wl->wl_lock, flags); |
| |
| if (run_irq_work) { |
| wl1271_debug(DEBUG_MAC80211, |
| "run postponed irq_work directly"); |
| wl1271_irq(0, wl); |
| wl1271_enable_interrupts(wl); |
| } |
| wl1271_configure_resume(wl); |
| wl->wow_enabled = false; |
| |
| return 0; |
| } |
| #endif |
| |
| static int wl1271_op_start(struct ieee80211_hw *hw) |
| { |
| wl1271_debug(DEBUG_MAC80211, "mac80211 start"); |
| |
| /* |
| * We have to delay the booting of the hardware because |
| * we need to know the local MAC address before downloading and |
| * initializing the firmware. The MAC address cannot be changed |
| * after boot, and without the proper MAC address, the firmware |
| * will not function properly. |
| * |
| * The MAC address is first known when the corresponding interface |
| * is added. That is where we will initialize the hardware. |
| */ |
| |
| return 0; |
| } |
| |
| static void wl1271_op_stop(struct ieee80211_hw *hw) |
| { |
| wl1271_debug(DEBUG_MAC80211, "mac80211 stop"); |
| } |
| |
| static u8 wl12xx_get_role_type(struct wl1271 *wl) |
| { |
| switch (wl->bss_type) { |
| case BSS_TYPE_AP_BSS: |
| if (wl->p2p) |
| return WL1271_ROLE_P2P_GO; |
| else |
| return WL1271_ROLE_AP; |
| |
| case BSS_TYPE_STA_BSS: |
| if (wl->p2p) |
| return WL1271_ROLE_P2P_CL; |
| else |
| return WL1271_ROLE_STA; |
| |
| case BSS_TYPE_IBSS: |
| return WL1271_ROLE_IBSS; |
| |
| default: |
| wl1271_error("invalid bss_type: %d", wl->bss_type); |
| } |
| return WL12XX_INVALID_ROLE_TYPE; |
| } |
| |
| static int wl1271_op_add_interface(struct ieee80211_hw *hw, |
| struct ieee80211_vif *vif) |
| { |
| struct wl1271 *wl = hw->priv; |
| struct wiphy *wiphy = hw->wiphy; |
| int retries = WL1271_BOOT_RETRIES; |
| int ret = 0; |
| u8 role_type; |
| bool booted = false; |
| |
| wl1271_debug(DEBUG_MAC80211, "mac80211 add interface type %d mac %pM", |
| ieee80211_vif_type_p2p(vif), vif->addr); |
| |
| mutex_lock(&wl->mutex); |
| if (wl->vif) { |
| wl1271_debug(DEBUG_MAC80211, |
| "multiple vifs are not supported yet"); |
| ret = -EBUSY; |
| goto out; |
| } |
| |
| /* |
| * in some very corner case HW recovery scenarios its possible to |
| * get here before __wl1271_op_remove_interface is complete, so |
| * opt out if that is the case. |
| */ |
| if (test_bit(WL1271_FLAG_IF_INITIALIZED, &wl->flags)) { |
| ret = -EBUSY; |
| goto out; |
| } |
| |
| switch (ieee80211_vif_type_p2p(vif)) { |
| case NL80211_IFTYPE_P2P_CLIENT: |
| wl->p2p = 1; |
| /* fall-through */ |
| case NL80211_IFTYPE_STATION: |
| wl->bss_type = BSS_TYPE_STA_BSS; |
| wl->set_bss_type = BSS_TYPE_STA_BSS; |
| break; |
| case NL80211_IFTYPE_ADHOC: |
| wl->bss_type = BSS_TYPE_IBSS; |
| wl->set_bss_type = BSS_TYPE_STA_BSS; |
| break; |
| case NL80211_IFTYPE_P2P_GO: |
| wl->p2p = 1; |
| /* fall-through */ |
| case NL80211_IFTYPE_AP: |
| wl->bss_type = BSS_TYPE_AP_BSS; |
| break; |
| default: |
| ret = -EOPNOTSUPP; |
| goto out; |
| } |
| |
| role_type = wl12xx_get_role_type(wl); |
| if (role_type == WL12XX_INVALID_ROLE_TYPE) { |
| ret = -EINVAL; |
| goto out; |
| } |
| memcpy(wl->mac_addr, vif->addr, ETH_ALEN); |
| |
| if (wl->state != WL1271_STATE_OFF) { |
| wl1271_error("cannot start because not in off state: %d", |
| wl->state); |
| ret = -EBUSY; |
| goto out; |
| } |
| |
| while (retries) { |
| retries--; |
| ret = wl1271_chip_wakeup(wl); |
| if (ret < 0) |
| goto power_off; |
| |
| ret = wl1271_boot(wl); |
| if (ret < 0) |
| goto power_off; |
| |
| if (wl->bss_type == BSS_TYPE_STA_BSS || |
| wl->bss_type == BSS_TYPE_IBSS) { |
| /* |
| * The device role is a special role used for |
| * rx and tx frames prior to association (as |
| * the STA role can get packets only from |
| * its associated bssid) |
| */ |
| ret = wl12xx_cmd_role_enable(wl, |
| WL1271_ROLE_DEVICE, |
| &wl->dev_role_id); |
| if (ret < 0) |
| goto irq_disable; |
| } |
| |
| ret = wl12xx_cmd_role_enable(wl, role_type, &wl->role_id); |
| if (ret < 0) |
| goto irq_disable; |
| |
| ret = wl1271_hw_init(wl); |
| if (ret < 0) |
| goto irq_disable; |
| |
| booted = true; |
| break; |
| |
| irq_disable: |
| mutex_unlock(&wl->mutex); |
| /* Unlocking the mutex in the middle of handling is |
| inherently unsafe. In this case we deem it safe to do, |
| because we need to let any possibly pending IRQ out of |
| the system (and while we are WL1271_STATE_OFF the IRQ |
| work function will not do anything.) Also, any other |
| possible concurrent operations will fail due to the |
| current state, hence the wl1271 struct should be safe. */ |
| wl1271_disable_interrupts(wl); |
| wl1271_flush_deferred_work(wl); |
| cancel_work_sync(&wl->netstack_work); |
| mutex_lock(&wl->mutex); |
| power_off: |
| wl1271_power_off(wl); |
| } |
| |
| if (!booted) { |
| wl1271_error("firmware boot failed despite %d retries", |
| WL1271_BOOT_RETRIES); |
| goto out; |
| } |
| |
| wl->vif = vif; |
| wl->state = WL1271_STATE_ON; |
| set_bit(WL1271_FLAG_IF_INITIALIZED, &wl->flags); |
| wl1271_info("firmware booted (%s)", wl->chip.fw_ver_str); |
| |
| /* update hw/fw version info in wiphy struct */ |
| wiphy->hw_version = wl->chip.id; |
| strncpy(wiphy->fw_version, wl->chip.fw_ver_str, |
| sizeof(wiphy->fw_version)); |
| |
| /* |
| * Now we know if 11a is supported (info from the NVS), so disable |
| * 11a channels if not supported |
| */ |
| if (!wl->enable_11a) |
| wiphy->bands[IEEE80211_BAND_5GHZ]->n_channels = 0; |
| |
| wl1271_debug(DEBUG_MAC80211, "11a is %ssupported", |
| wl->enable_11a ? "" : "not "); |
| |
| out: |
| mutex_unlock(&wl->mutex); |
| |
| mutex_lock(&wl_list_mutex); |
| if (!ret) |
| list_add(&wl->list, &wl_list); |
| mutex_unlock(&wl_list_mutex); |
| |
| return ret; |
| } |
| |
| static void __wl1271_op_remove_interface(struct wl1271 *wl, |
| bool reset_tx_queues) |
| { |
| int ret, i; |
| |
| wl1271_debug(DEBUG_MAC80211, "mac80211 remove interface"); |
| |
| /* because of hardware recovery, we may get here twice */ |
| if (wl->state != WL1271_STATE_ON) |
| return; |
| |
| wl1271_info("down"); |
| |
| mutex_lock(&wl_list_mutex); |
| list_del(&wl->list); |
| mutex_unlock(&wl_list_mutex); |
| |
| /* enable dyn ps just in case (if left on due to fw crash etc) */ |
| if (wl->bss_type == BSS_TYPE_STA_BSS) |
| ieee80211_enable_dyn_ps(wl->vif); |
| |
| if (wl->scan.state != WL1271_SCAN_STATE_IDLE) { |
| wl->scan.state = WL1271_SCAN_STATE_IDLE; |
| memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch)); |
| wl->scan.req = NULL; |
| ieee80211_scan_completed(wl->hw, true); |
| } |
| |
| if (!test_bit(WL1271_FLAG_RECOVERY_IN_PROGRESS, &wl->flags)) { |
| /* disable active roles */ |
| ret = wl1271_ps_elp_wakeup(wl); |
| if (ret < 0) |
| goto deinit; |
| |
| if (wl->bss_type == BSS_TYPE_STA_BSS) { |
| ret = wl12xx_cmd_role_disable(wl, &wl->dev_role_id); |
| if (ret < 0) |
| goto deinit; |
| } |
| |
| ret = wl12xx_cmd_role_disable(wl, &wl->role_id); |
| if (ret < 0) |
| goto deinit; |
| |
| wl1271_ps_elp_sleep(wl); |
| } |
| deinit: |
| /* clear all hlids (except system_hlid) */ |
| wl->sta_hlid = WL12XX_INVALID_LINK_ID; |
| wl->dev_hlid = WL12XX_INVALID_LINK_ID; |
| wl->ap_bcast_hlid = WL12XX_INVALID_LINK_ID; |
| wl->ap_global_hlid = WL12XX_INVALID_LINK_ID; |
| |
| /* |
| * this must be before the cancel_work calls below, so that the work |
| * functions don't perform further work. |
| */ |
| wl->state = WL1271_STATE_OFF; |
| |
| mutex_unlock(&wl->mutex); |
| |
| wl1271_disable_interrupts(wl); |
| wl1271_flush_deferred_work(wl); |
| cancel_delayed_work_sync(&wl->scan_complete_work); |
| cancel_work_sync(&wl->netstack_work); |
| cancel_work_sync(&wl->tx_work); |
| del_timer_sync(&wl->rx_streaming_timer); |
| cancel_work_sync(&wl->rx_streaming_enable_work); |
| cancel_work_sync(&wl->rx_streaming_disable_work); |
| cancel_delayed_work_sync(&wl->pspoll_work); |
| cancel_delayed_work_sync(&wl->elp_work); |
| |
| mutex_lock(&wl->mutex); |
| |
| /* let's notify MAC80211 about the remaining pending TX frames */ |
| wl1271_tx_reset(wl, reset_tx_queues); |
| wl1271_power_off(wl); |
| |
| memset(wl->bssid, 0, ETH_ALEN); |
| memset(wl->ssid, 0, IEEE80211_MAX_SSID_LEN + 1); |
| wl->ssid_len = 0; |
| wl->bss_type = MAX_BSS_TYPE; |
| wl->set_bss_type = MAX_BSS_TYPE; |
| wl->p2p = 0; |
| wl->band = IEEE80211_BAND_2GHZ; |
| |
| wl->rx_counter = 0; |
| wl->psm_entry_retry = 0; |
| wl->power_level = WL1271_DEFAULT_POWER_LEVEL; |
| wl->tx_blocks_available = 0; |
| wl->tx_allocated_blocks = 0; |
| wl->tx_results_count = 0; |
| wl->tx_packets_count = 0; |
| wl->time_offset = 0; |
| wl->session_counter = 0; |
| wl->rate_set = CONF_TX_RATE_MASK_BASIC; |
| wl->bitrate_masks[IEEE80211_BAND_2GHZ] = wl->conf.tx.basic_rate; |
| wl->bitrate_masks[IEEE80211_BAND_5GHZ] = wl->conf.tx.basic_rate_5; |
| wl->vif = NULL; |
| wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT; |
| wl1271_free_ap_keys(wl); |
| memset(wl->ap_hlid_map, 0, sizeof(wl->ap_hlid_map)); |
| wl->ap_fw_ps_map = 0; |
| wl->ap_ps_map = 0; |
| wl->sched_scanning = false; |
| wl->role_id = WL12XX_INVALID_ROLE_ID; |
| wl->dev_role_id = WL12XX_INVALID_ROLE_ID; |
| memset(wl->roles_map, 0, sizeof(wl->roles_map)); |
| memset(wl->links_map, 0, sizeof(wl->links_map)); |
| memset(wl->roc_map, 0, sizeof(wl->roc_map)); |
| wl->active_sta_count = 0; |
| |
| /* The system link is always allocated */ |
| __set_bit(WL12XX_SYSTEM_HLID, wl->links_map); |
| |
| /* |
| * this is performed after the cancel_work calls and the associated |
| * mutex_lock, so that wl1271_op_add_interface does not accidentally |
| * get executed before all these vars have been reset. |
| */ |
| wl->flags = 0; |
| |
| wl->tx_blocks_freed = 0; |
| |
| for (i = 0; i < NUM_TX_QUEUES; i++) { |
| wl->tx_pkts_freed[i] = 0; |
| wl->tx_allocated_pkts[i] = 0; |
| } |
| |
| wl1271_debugfs_reset(wl); |
| |
| kfree(wl->fw_status); |
| wl->fw_status = NULL; |
| kfree(wl->tx_res_if); |
| wl->tx_res_if = NULL; |
| kfree(wl->target_mem_map); |
| wl->target_mem_map = NULL; |
| } |
| |
| static void wl1271_op_remove_interface(struct ieee80211_hw *hw, |
| struct ieee80211_vif *vif) |
| { |
| struct wl1271 *wl = hw->priv; |
| |
| mutex_lock(&wl->mutex); |
| /* |
| * wl->vif can be null here if someone shuts down the interface |
| * just when hardware recovery has been started. |
| */ |
| if (wl->vif) { |
| WARN_ON(wl->vif != vif); |
| __wl1271_op_remove_interface(wl, true); |
| } |
| |
| mutex_unlock(&wl->mutex); |
| cancel_work_sync(&wl->recovery_work); |
| } |
| |
| static int wl1271_join(struct wl1271 *wl, bool set_assoc) |
| { |
| int ret; |
| bool is_ibss = (wl->bss_type == BSS_TYPE_IBSS); |
| |
| /* |
| * One of the side effects of the JOIN command is that is clears |
| * WPA/WPA2 keys from the chipset. Performing a JOIN while associated |
| * to a WPA/WPA2 access point will therefore kill the data-path. |
| * Currently the only valid scenario for JOIN during association |
| * is on roaming, in which case we will also be given new keys. |
| * Keep the below message for now, unless it starts bothering |
| * users who really like to roam a lot :) |
| */ |
| if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) |
| wl1271_info("JOIN while associated."); |
| |
| if (set_assoc) |
| set_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags); |
| |
| if (is_ibss) |
| ret = wl12xx_cmd_role_start_ibss(wl); |
| else |
| ret = wl12xx_cmd_role_start_sta(wl); |
| if (ret < 0) |
| goto out; |
| |
| if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) |
| goto out; |
| |
| /* |
| * The join command disable the keep-alive mode, shut down its process, |
| * and also clear the template config, so we need to reset it all after |
| * the join. The acx_aid starts the keep-alive process, and the order |
| * of the commands below is relevant. |
| */ |
| ret = wl1271_acx_keep_alive_mode(wl, true); |
| if (ret < 0) |
| goto out; |
| |
| ret = wl1271_acx_aid(wl, wl->aid); |
| if (ret < 0) |
| goto out; |
| |
| ret = wl1271_cmd_build_klv_null_data(wl); |
| if (ret < 0) |
| goto out; |
| |
| ret = wl1271_acx_keep_alive_config(wl, CMD_TEMPL_KLV_IDX_NULL_DATA, |
| ACX_KEEP_ALIVE_TPL_VALID); |
| if (ret < 0) |
| goto out; |
| |
| out: |
| return ret; |
| } |
| |
| static int wl1271_unjoin(struct wl1271 *wl) |
| { |
| int ret; |
| |
| if (test_and_clear_bit(WL1271_FLAG_CS_PROGRESS, &wl->flags)) { |
| wl12xx_cmd_stop_channel_switch(wl); |
| ieee80211_chswitch_done(wl->vif, false); |
| } |
| |
| /* to stop listening to a channel, we disconnect */ |
| ret = wl12xx_cmd_role_stop_sta(wl); |
| if (ret < 0) |
| goto out; |
| |
| memset(wl->bssid, 0, ETH_ALEN); |
| |
| /* reset TX security counters on a clean disconnect */ |
| wl->tx_security_last_seq_lsb = 0; |
| wl->tx_security_seq = 0; |
| |
| out: |
| return ret; |
| } |
| |
| static void wl1271_set_band_rate(struct wl1271 *wl) |
| { |
| wl->basic_rate_set = wl->bitrate_masks[wl->band]; |
| wl->rate_set = wl->basic_rate_set; |
| } |
| |
| static bool wl12xx_is_roc(struct wl1271 *wl) |
| { |
| u8 role_id; |
| |
| role_id = find_first_bit(wl->roc_map, WL12XX_MAX_ROLES); |
| if (role_id >= WL12XX_MAX_ROLES) |
| return false; |
| |
| return true; |
| } |
| |
| static int wl1271_sta_handle_idle(struct wl1271 *wl, bool idle) |
| { |
| int ret; |
| |
| if (idle) { |
| /* no need to croc if we weren't busy (e.g. during boot) */ |
| if (wl12xx_is_roc(wl)) { |
| ret = wl12xx_croc(wl, wl->dev_role_id); |
| if (ret < 0) |
| goto out; |
| |
| ret = wl12xx_cmd_role_stop_dev(wl); |
| if (ret < 0) |
| goto out; |
| } |
| wl->rate_set = wl1271_tx_min_rate_get(wl, wl->basic_rate_set); |
| ret = wl1271_acx_sta_rate_policies(wl); |
| if (ret < 0) |
| goto out; |
| ret = wl1271_acx_keep_alive_config( |
| wl, CMD_TEMPL_KLV_IDX_NULL_DATA, |
| ACX_KEEP_ALIVE_TPL_INVALID); |
| if (ret < 0) |
| goto out; |
| set_bit(WL1271_FLAG_IDLE, &wl->flags); |
| } else { |
| /* The current firmware only supports sched_scan in idle */ |
| if (wl->sched_scanning) { |
| wl1271_scan_sched_scan_stop(wl); |
| ieee80211_sched_scan_stopped(wl->hw); |
| } |
| |
| ret = wl12xx_cmd_role_start_dev(wl); |
| if (ret < 0) |
| goto out; |
| |
| ret = wl12xx_roc(wl, wl->dev_role_id); |
| if (ret < 0) |
| goto out; |
| clear_bit(WL1271_FLAG_IDLE, &wl->flags); |
| } |
| |
| out: |
| return ret; |
| } |
| |
| static int wl1271_op_config(struct ieee80211_hw *hw, u32 changed) |
| { |
| struct wl1271 *wl = hw->priv; |
| struct ieee80211_conf *conf = &hw->conf; |
| int channel, ret = 0; |
| bool is_ap; |
| |
| channel = ieee80211_frequency_to_channel(conf->channel->center_freq); |
| |
| wl1271_debug(DEBUG_MAC80211, "mac80211 config ch %d psm %s power %d %s" |
| " changed 0x%x", |
| channel, |
| conf->flags & IEEE80211_CONF_PS ? "on" : "off", |
| conf->power_level, |
| conf->flags & IEEE80211_CONF_IDLE ? "idle" : "in use", |
| changed); |
| |
| /* |
| * mac80211 will go to idle nearly immediately after transmitting some |
| * frames, such as the deauth. To make sure those frames reach the air, |
| * wait here until the TX queue is fully flushed. |
| */ |
| if ((changed & IEEE80211_CONF_CHANGE_IDLE) && |
| (conf->flags & IEEE80211_CONF_IDLE)) |
| wl1271_tx_flush(wl); |
| |
| mutex_lock(&wl->mutex); |
| |
| if (unlikely(wl->state == WL1271_STATE_OFF)) { |
| /* we support configuring the channel and band while off */ |
| if ((changed & IEEE80211_CONF_CHANGE_CHANNEL)) { |
| wl->band = conf->channel->band; |
| wl->channel = channel; |
| } |
| |
| if ((changed & IEEE80211_CONF_CHANGE_POWER)) |
| wl->power_level = conf->power_level; |
| |
| goto out; |
| } |
| |
| is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); |
| |
| ret = wl1271_ps_elp_wakeup(wl); |
| if (ret < 0) |
| goto out; |
| |
| /* if the channel changes while joined, join again */ |
| if (changed & IEEE80211_CONF_CHANGE_CHANNEL && |
| ((wl->band != conf->channel->band) || |
| (wl->channel != channel))) { |
| /* send all pending packets */ |
| wl1271_tx_work_locked(wl); |
| wl->band = conf->channel->band; |
| wl->channel = channel; |
| |
| if (!is_ap) { |
| /* |
| * FIXME: the mac80211 should really provide a fixed |
| * rate to use here. for now, just use the smallest |
| * possible rate for the band as a fixed rate for |
| * association frames and other control messages. |
| */ |
| if (!test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) |
| wl1271_set_band_rate(wl); |
| |
| wl->basic_rate = |
| wl1271_tx_min_rate_get(wl, wl->basic_rate_set); |
| ret = wl1271_acx_sta_rate_policies(wl); |
| if (ret < 0) |
| wl1271_warning("rate policy for channel " |
| "failed %d", ret); |
| |
| if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) { |
| if (wl12xx_is_roc(wl)) { |
| /* roaming */ |
| ret = wl12xx_croc(wl, wl->dev_role_id); |
| if (ret < 0) |
| goto out_sleep; |
| } |
| ret = wl1271_join(wl, false); |
| if (ret < 0) |
| wl1271_warning("cmd join on channel " |
| "failed %d", ret); |
| } else { |
| /* |
| * change the ROC channel. do it only if we are |
| * not idle. otherwise, CROC will be called |
| * anyway. |
| */ |
| if (wl12xx_is_roc(wl) && |
| !(conf->flags & IEEE80211_CONF_IDLE)) { |
| ret = wl12xx_croc(wl, wl->dev_role_id); |
| if (ret < 0) |
| goto out_sleep; |
| |
| ret = wl12xx_roc(wl, wl->dev_role_id); |
| if (ret < 0) |
| wl1271_warning("roc failed %d", |
| ret); |
| } |
| } |
| } |
| } |
| |
| if (changed & IEEE80211_CONF_CHANGE_IDLE && !is_ap) { |
| ret = wl1271_sta_handle_idle(wl, |
| conf->flags & IEEE80211_CONF_IDLE); |
| if (ret < 0) |
| wl1271_warning("idle mode change failed %d", ret); |
| } |
| |
| /* |
| * if mac80211 changes the PSM mode, make sure the mode is not |
| * incorrectly changed after the pspoll failure active window. |
| */ |
| if (changed & IEEE80211_CONF_CHANGE_PS) |
| clear_bit(WL1271_FLAG_PSPOLL_FAILURE, &wl->flags); |
| |
| if (conf->flags & IEEE80211_CONF_PS && |
| !test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) { |
| set_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags); |
| |
| /* |
| * We enter PSM only if we're already associated. |
| * If we're not, we'll enter it when joining an SSID, |
| * through the bss_info_changed() hook. |
| */ |
| if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) { |
| wl1271_debug(DEBUG_PSM, "psm enabled"); |
| ret = wl1271_ps_set_mode(wl, STATION_POWER_SAVE_MODE, |
| wl->basic_rate, true); |
| } |
| } else if (!(conf->flags & IEEE80211_CONF_PS) && |
| test_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags)) { |
| wl1271_debug(DEBUG_PSM, "psm disabled"); |
| |
| clear_bit(WL1271_FLAG_PSM_REQUESTED, &wl->flags); |
| |
| if (test_bit(WL1271_FLAG_PSM, &wl->flags)) |
| ret = wl1271_ps_set_mode(wl, STATION_ACTIVE_MODE, |
| wl->basic_rate, true); |
| } |
| |
| if (conf->power_level != wl->power_level) { |
| ret = wl1271_acx_tx_power(wl, conf->power_level); |
| if (ret < 0) |
| goto out_sleep; |
| |
| wl->power_level = conf->power_level; |
| } |
| |
| out_sleep: |
| wl1271_ps_elp_sleep(wl); |
| |
| out: |
| mutex_unlock(&wl->mutex); |
| |
| return ret; |
| } |
| |
| struct wl1271_filter_params { |
| bool enabled; |
| int mc_list_length; |
| u8 mc_list[ACX_MC_ADDRESS_GROUP_MAX][ETH_ALEN]; |
| }; |
| |
| #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) |
| static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw, |
| struct netdev_hw_addr_list *mc_list) |
| #else |
| static u64 wl1271_op_prepare_multicast(struct ieee80211_hw *hw, int mc_count, |
| struct dev_addr_list *mc_list) |
| #endif |
| { |
| struct wl1271_filter_params *fp; |
| #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) |
| struct netdev_hw_addr *ha; |
| #else |
| int i; |
| #endif |
| struct wl1271 *wl = hw->priv; |
| |
| if (unlikely(wl->state == WL1271_STATE_OFF)) |
| return 0; |
| |
| fp = kzalloc(sizeof(*fp), GFP_ATOMIC); |
| if (!fp) { |
| wl1271_error("Out of memory setting filters."); |
| return 0; |
| } |
| |
| /* update multicast filtering parameters */ |
| #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) |
| fp->mc_list_length = 0; |
| if (netdev_hw_addr_list_count(mc_list) > ACX_MC_ADDRESS_GROUP_MAX) { |
| #else |
| fp->enabled = true; |
| if (mc_count > ACX_MC_ADDRESS_GROUP_MAX) { |
| mc_count = 0; |
| #endif |
| fp->enabled = false; |
| #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) |
| } else { |
| fp->enabled = true; |
| netdev_hw_addr_list_for_each(ha, mc_list) { |
| #else |
| } |
| |
| fp->mc_list_length = 0; |
| for (i = 0; i < mc_count; i++) { |
| if (mc_list->da_addrlen == ETH_ALEN) { |
| #endif |
| memcpy(fp->mc_list[fp->mc_list_length], |
| #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) |
| ha->addr, ETH_ALEN); |
| #else |
| mc_list->da_addr, ETH_ALEN); |
| #endif |
| fp->mc_list_length++; |
| #if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35)) |
| } |
| #else |
| } else |
| wl1271_warning("Unknown mc address length."); |
| mc_list = mc_list->next; |
| #endif |
| } |
| |
| return (u64)(unsigned long)fp; |
| } |
| |
| #define WL1271_SUPPORTED_FILTERS (FIF_PROMISC_IN_BSS | \ |
| FIF_ALLMULTI | \ |
| FIF_FCSFAIL | \ |
| FIF_BCN_PRBRESP_PROMISC | \ |
| FIF_CONTROL | \ |
| FIF_OTHER_BSS) |
| |
| static void wl1271_op_configure_filter(struct ieee80211_hw *hw, |
| unsigned int changed, |
| unsigned int *total, u64 multicast) |
| { |
| struct wl1271_filter_params *fp = (void *)(unsigned long)multicast; |
| struct wl1271 *wl = hw->priv; |
| int ret; |
| |
| wl1271_debug(DEBUG_MAC80211, "mac80211 configure filter changed %x" |
| " total %x", changed, *total); |
| |
| mutex_lock(&wl->mutex); |
| |
| *total &= WL1271_SUPPORTED_FILTERS; |
| changed &= WL1271_SUPPORTED_FILTERS; |
| |
| if (unlikely(wl->state == WL1271_STATE_OFF)) |
| goto out; |
| |
| ret = wl1271_ps_elp_wakeup(wl); |
| if (ret < 0) |
| goto out; |
| |
| if (wl->bss_type != BSS_TYPE_AP_BSS) { |
| if (*total & FIF_ALLMULTI) |
| ret = wl1271_acx_group_address_tbl(wl, false, NULL, 0); |
| else if (fp) |
| ret = wl1271_acx_group_address_tbl(wl, fp->enabled, |
| fp->mc_list, |
| fp->mc_list_length); |
| if (ret < 0) |
| goto out_sleep; |
| } |
| |
| /* |
| * the fw doesn't provide an api to configure the filters. instead, |
| * the filters configuration is based on the active roles / ROC |
| * state. |
| */ |
| |
| out_sleep: |
| wl1271_ps_elp_sleep(wl); |
| |
| out: |
| mutex_unlock(&wl->mutex); |
| kfree(fp); |
| } |
| |
| static int wl1271_record_ap_key(struct wl1271 *wl, u8 id, u8 key_type, |
| u8 key_size, const u8 *key, u8 hlid, u32 tx_seq_32, |
| u16 tx_seq_16) |
| { |
| struct wl1271_ap_key *ap_key; |
| int i; |
| |
| wl1271_debug(DEBUG_CRYPT, "record ap key id %d", (int)id); |
| |
| if (key_size > MAX_KEY_SIZE) |
| return -EINVAL; |
| |
| /* |
| * Find next free entry in ap_keys. Also check we are not replacing |
| * an existing key. |
| */ |
| for (i = 0; i < MAX_NUM_KEYS; i++) { |
| if (wl->recorded_ap_keys[i] == NULL) |
| break; |
| |
| if (wl->recorded_ap_keys[i]->id == id) { |
| wl1271_warning("trying to record key replacement"); |
| return -EINVAL; |
| } |
| } |
| |
| if (i == MAX_NUM_KEYS) |
| return -EBUSY; |
| |
| ap_key = kzalloc(sizeof(*ap_key), GFP_KERNEL); |
| if (!ap_key) |
| return -ENOMEM; |
| |
| ap_key->id = id; |
| ap_key->key_type = key_type; |
| ap_key->key_size = key_size; |
| memcpy(ap_key->key, key, key_size); |
| ap_key->hlid = hlid; |
| ap_key->tx_seq_32 = tx_seq_32; |
| ap_key->tx_seq_16 = tx_seq_16; |
| |
| wl->recorded_ap_keys[i] = ap_key; |
| return 0; |
| } |
| |
| static void wl1271_free_ap_keys(struct wl1271 *wl) |
| { |
| int i; |
| |
| for (i = 0; i < MAX_NUM_KEYS; i++) { |
| kfree(wl->recorded_ap_keys[i]); |
| wl->recorded_ap_keys[i] = NULL; |
| } |
| } |
| |
| static int wl1271_ap_init_hwenc(struct wl1271 *wl) |
| { |
| int i, ret = 0; |
| struct wl1271_ap_key *key; |
| bool wep_key_added = false; |
| |
| for (i = 0; i < MAX_NUM_KEYS; i++) { |
| u8 hlid; |
| if (wl->recorded_ap_keys[i] == NULL) |
| break; |
| |
| key = wl->recorded_ap_keys[i]; |
| hlid = key->hlid; |
| if (hlid == WL12XX_INVALID_LINK_ID) |
| hlid = wl->ap_bcast_hlid; |
| |
| ret = wl1271_cmd_set_ap_key(wl, KEY_ADD_OR_REPLACE, |
| key->id, key->key_type, |
| key->key_size, key->key, |
| hlid, key->tx_seq_32, |
| key->tx_seq_16); |
| if (ret < 0) |
| goto out; |
| |
| if (key->key_type == KEY_WEP) |
| wep_key_added = true; |
| } |
| |
| if (wep_key_added) { |
| ret = wl12xx_cmd_set_default_wep_key(wl, wl->default_key, |
| wl->ap_bcast_hlid); |
| if (ret < 0) |
| goto out; |
| } |
| |
| out: |
| wl1271_free_ap_keys(wl); |
| return ret; |
| } |
| |
| static int wl1271_set_key(struct wl1271 *wl, u16 action, u8 id, u8 key_type, |
| u8 key_size, const u8 *key, u32 tx_seq_32, |
| u16 tx_seq_16, struct ieee80211_sta *sta) |
| { |
| int ret; |
| bool is_ap = (wl->bss_type == BSS_TYPE_AP_BSS); |
| |
| if (is_ap) { |
| struct wl1271_station *wl_sta; |
| u8 hlid; |
| |
| if (sta) { |
| wl_sta = (struct wl1271_station *)sta->drv_priv; |
| hlid = wl_sta->hlid; |
| } else { |
| hlid = wl->ap_bcast_hlid; |
| } |
| |
| if (!test_bit(WL1271_FLAG_AP_STARTED, &wl->flags)) { |
| /* |
| * We do not support removing keys after AP shutdown. |
| * Pretend we do to make mac80211 happy. |
| */ |
| if (action != KEY_ADD_OR_REPLACE) |
| return 0; |
| |
| ret = wl1271_record_ap_key(wl, id, |
| key_type, key_size, |
| key, hlid, tx_seq_32, |
| tx_seq_16); |
| } else { |
| ret = wl1271_cmd_set_ap_key(wl, action, |
| id, key_type, key_size, |
| key, hlid, tx_seq_32, |
| tx_seq_16); |
| } |
| |
| if (ret < 0) |
| return ret; |
| } else { |
| const u8 *addr; |
| static const u8 bcast_addr[ETH_ALEN] = { |
| 0xff, 0xff, 0xff, 0xff, 0xff, 0xff |
| }; |
| |
| /* |
| * A STA set to GEM cipher requires 2 tx spare blocks. |
| * Return to default value when GEM cipher key is removed |
| */ |
| if (key_type == KEY_GEM) { |
| if (action == KEY_ADD_OR_REPLACE) |
| wl->tx_spare_blocks = 2; |
| else if (action == KEY_REMOVE) |
| wl->tx_spare_blocks = TX_HW_BLOCK_SPARE_DEFAULT; |
| } |
| |
| addr = sta ? sta->addr : bcast_addr; |
| |
| if (is_zero_ether_addr(addr)) { |
| /* We dont support TX only encryption */ |
| return -EOPNOTSUPP; |
| } |
| |
| /* The wl1271 does not allow to remove unicast keys - they |
| will be cleared automatically on next CMD_JOIN. Ignore the |
| request silently, as we dont want the mac80211 to emit |
| an error message. */ |
| if (action == KEY_REMOVE && !is_broadcast_ether_addr(addr)) |
| return 0; |
| |
| /* don't remove key if hlid was already deleted */ |
| if (action == KEY_REMOVE && |
| wl->sta_hlid == WL12XX_INVALID_LINK_ID) |
| return 0; |
| |
| ret = wl1271_cmd_set_sta_key(wl, action, |
| id, key_type, key_size, |
| key, addr, tx_seq_32, |
| tx_seq_16); |
| if (ret < 0) |
| return ret; |
| |
| /* the default WEP key needs to be configured at least once */ |
| if (key_type == KEY_WEP) { |
| ret = wl12xx_cmd_set_default_wep_key(wl, |
| wl->default_key, |
| wl->sta_hlid); |
| if (ret < 0) |
| return ret; |
| } |
| } |
| |
| return 0; |
| } |
| |
| static int wl1271_op_set_key(struct ieee80211_hw *hw, enum set_key_cmd cmd, |
| struct ieee80211_vif *vif, |
| struct ieee80211_sta *sta, |
| struct ieee80211_key_conf *key_conf) |
| { |
| struct wl1271 *wl = hw->priv; |
| int ret; |
| u32 tx_seq_32 = 0; |
| u16 tx_seq_16 = 0; |
| u8 key_type; |
| |
| wl1271_debug(DEBUG_MAC80211, "mac80211 set key"); |
| |
| wl1271_debug(DEBUG_CRYPT, "CMD: 0x%x sta: %p", cmd, sta); |
| wl1271_debug(DEBUG_CRYPT, "Key: algo:0x%x, id:%d, len:%d flags 0x%x", |
| key_conf->cipher, key_conf->keyidx, |
| key_conf->keylen, key_conf->flags); |
| wl1271_dump(DEBUG_CRYPT, "KEY: ", key_conf->key, key_conf->keylen); |
| |
| mutex_lock(&wl->mutex); |
| |
| if (unlikely(wl->state == WL1271_STATE_OFF)) { |
| ret = -EAGAIN; |
| goto out_unlock; |
| } |
| |
| ret = wl1271_ps_elp_wakeup(wl); |
| if (ret < 0) |
| goto out_unlock; |
| |
| switch (key_conf->cipher) { |
| case WLAN_CIPHER_SUITE_WEP40: |
| case WLAN_CIPHER_SUITE_WEP104: |
| key_type = KEY_WEP; |
| |
| key_conf->hw_key_idx = key_conf->keyidx; |
| break; |
| case WLAN_CIPHER_SUITE_TKIP: |
| key_type = KEY_TKIP; |
| |
| key_conf->hw_key_idx = key_conf->keyidx; |
| tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq); |
| tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq); |
| break; |
| case WLAN_CIPHER_SUITE_CCMP: |
| key_type = KEY_AES; |
| |
| key_conf->flags |= IEEE80211_KEY_FLAG_GENERATE_IV; |
| tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq); |
| tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq); |
| break; |
| case WL1271_CIPHER_SUITE_GEM: |
| key_type = KEY_GEM; |
| tx_seq_32 = WL1271_TX_SECURITY_HI32(wl->tx_security_seq); |
| tx_seq_16 = WL1271_TX_SECURITY_LO16(wl->tx_security_seq); |
| break; |
| default: |
| wl1271_error("Unknown key algo 0x%x", key_conf->cipher); |
| |
| ret = -EOPNOTSUPP; |
| goto out_sleep; |
| } |
| |
| switch (cmd) { |
| case SET_KEY: |
| ret = wl1271_set_key(wl, KEY_ADD_OR_REPLACE, |
| key_conf->keyidx, key_type, |
| key_conf->keylen, key_conf->key, |
| tx_seq_32, tx_seq_16, sta); |
| if (ret < 0) { |
| wl1271_error("Could not add or replace key"); |
| goto out_sleep; |
| } |
| break; |
| |
| case DISABLE_KEY: |
| ret = wl1271_set_key(wl, KEY_REMOVE, |
| key_conf->keyidx, key_type, |
| key_conf->keylen, key_conf->key, |
| 0, 0, sta); |
| if (ret < 0) { |
| wl1271_error("Could not remove key"); |
| goto out_sleep; |
| } |
| break; |
| |
| default: |
| wl1271_error("Unsupported key cmd 0x%x", cmd); |
| ret = -EOPNOTSUPP; |
| break; |
| } |
| |
| out_sleep: |
| wl1271_ps_elp_sleep(wl); |
| |
| out_unlock: |
| mutex_unlock(&wl->mutex); |
| |
| return ret; |
| } |
| |
| static int wl1271_op_hw_scan(struct ieee80211_hw *hw, |
| struct ieee80211_vif *vif, |
| struct cfg80211_scan_request *req) |
| { |
| struct wl1271 *wl = hw->priv; |
| int ret; |
| u8 *ssid = NULL; |
| size_t len = 0; |
| |
| wl1271_debug(DEBUG_MAC80211, "mac80211 hw scan"); |
| |
| if (req->n_ssids) { |
| ssid = req->ssids[0].ssid; |
| len = req->ssids[0].ssid_len; |
| } |
| |
| mutex_lock(&wl->mutex); |
| |
| if (wl->state == WL1271_STATE_OFF) { |
| /* |
| * We cannot return -EBUSY here because cfg80211 will expect |
| * a call to ieee80211_scan_completed if we do - in this case |
| * there won't be any call. |
| */ |
| ret = -EAGAIN; |
| goto out; |
| } |
| |
| ret = wl1271_ps_elp_wakeup(wl); |
| if (ret < 0) |
| goto out; |
| |
| /* cancel ROC before scanning */ |
| if (wl12xx_is_roc(wl)) { |
| if (test_bit(WL1271_FLAG_STA_ASSOCIATED, &wl->flags)) { |
| /* don't allow scanning right now */ |
| ret = -EBUSY; |
| goto out_sleep; |
| } |
| wl12xx_croc(wl, wl->dev_role_id); |
| wl12xx_cmd_role_stop_dev(wl); |
| } |
| |
| ret = wl1271_scan(hw->priv, ssid, len, req); |
| out_sleep: |
| wl1271_ps_elp_sleep(wl); |
| out: |
| mutex_unlock(&wl->mutex); |
| |
| return ret; |
| } |
| |
| static void wl1271_op_cancel_hw_scan(struct ieee80211_hw *hw, |
| struct ieee80211_vif *vif) |
| { |
| struct wl1271 *wl = hw->priv; |
| int ret; |
| |
| wl1271_debug(DEBUG_MAC80211, "mac80211 cancel hw scan"); |
| |
| mutex_lock(&wl->mutex); |
| |
| if (wl->state == WL1271_STATE_OFF) |
| goto out; |
| |
| if (wl->scan.state == WL1271_SCAN_STATE_IDLE) |
| goto out; |
| |
| ret = wl1271_ps_elp_wakeup(wl); |
| if (ret < 0) |
| goto out; |
| |
| if (wl->scan.state != WL1271_SCAN_STATE_DONE) { |
| ret = wl1271_scan_stop(wl); |
| if (ret < 0) |
| goto out_sleep; |
| } |
| wl->scan.state = WL1271_SCAN_STATE_IDLE; |
| memset(wl->scan.scanned_ch, 0, sizeof(wl->scan.scanned_ch)); |
| wl->scan.req = NULL; |
| ieee80211_scan_completed(wl->hw, true); |
| |
| out_sleep: |
| wl1271_ps_elp_sleep(wl); |
| out: |
| mutex_unlock(&wl->mutex); |
| |
| cancel_delayed_work_sync(&wl->scan_complete_work); |
| } |
| |
| static int wl1271_op_sched_scan_start(struct ieee80211_hw *hw, |
| struct ieee80211_vif *vif, |
| struct cfg80211_sched_scan_request *req, |
| struct ieee80211_sched_scan_ies *ies) |
| { |
| struct wl1271 *wl = hw->priv; |
| int ret; |
| |
| wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_start"); |
| |
| mutex_lock(&wl->mutex); |
| |
| ret = wl1271_ps_elp_wakeup(wl); |
| if (ret < 0) |
| goto out; |
| |
| ret = wl1271_scan_sched_scan_config(wl, req, ies); |
| if (ret < 0) |
| goto out_sleep; |
| |
| ret = wl1271_scan_sched_scan_start(wl); |
| if (ret < 0) |
| goto out_sleep; |
| |
| wl->sched_scanning = true; |
| |
| out_sleep: |
| wl1271_ps_elp_sleep(wl); |
| out: |
| mutex_unlock(&wl->mutex); |
| return ret; |
| } |
| |
| static void wl1271_op_sched_scan_stop(struct ieee80211_hw *hw, |
| struct ieee80211_vif *vif) |
| { |
| struct wl1271 *wl = hw->priv; |
| int ret; |
| |
| wl1271_debug(DEBUG_MAC80211, "wl1271_op_sched_scan_stop"); |
| |
| mutex_lock(&wl->mutex); |
| |
| ret = wl1271_ps_elp_wakeup(wl); |
| if (ret < 0) |
| goto out; |
| |
| wl1271_scan_sched_scan_stop(wl); |
| |
| wl1271_ps_elp_sleep(wl); |
| out: |
| mutex_unlock(&wl->mutex); |
| } |
| |
| static int wl1271_op_set_frag_threshold(struct ieee80211_hw *hw, u32 value) |
| { |
| struct wl1271 *wl = hw->priv; |
| int ret = 0; |
| |
| mutex_lock(&wl->mutex); |
| |
| if (unlikely(wl->state == WL1271_STATE_OFF)) { |
| ret = -EAGAIN; |
| goto out; |
| } |
| |
| ret = wl1271_ps_elp_wakeup(wl); |
| if (ret < 0) |
| goto out; |
| |
| ret = wl1271_acx_frag_threshold(wl, value); |
| if (ret < 0) |
| wl1271_warning("wl1271_op_set_frag_threshold failed: %d", ret); |
| |
| wl1271_ps_elp_sleep(wl); |
| |
| out: |
| mutex_unlock(&wl->mutex); |
| |
| return ret; |
| } |
| |
| static int wl1271_op_set_rts_threshold(struct ieee80211_hw *hw, u32 value) |
| { |
| struct wl1271 *wl = hw->priv; |
| int ret = 0; |
| |
| mutex_lock(&wl->mutex); |
| |
| if (unlikely(wl->state == WL1271_STATE_OFF)) { |
| ret = -EAGAIN; |
| goto out; |
| } |
| |
| ret = wl1271_ps_elp_wakeup(wl); |
| if (ret < 0) |
| goto out; |
| |
| ret = wl1271_acx_rts_threshold(wl, value); |
| if (ret < 0) |
| wl1271_warning("wl1271_op_set_rts_threshold failed: %d", ret); |
| |
| wl1271_ps_elp_sleep(wl); |
| |
| out: |
| mutex_unlock(&wl->mutex); |
| |
| return ret; |
| } |
| |
| static int wl1271_ssid_set(struct wl1271 *wl, struct sk_buff *skb, |
| int offset) |
| { |
| u8 ssid_len; |
| const u8 *ptr = cfg80211_find_ie(WLAN_EID_SSID, skb->data + offset, |
| skb->len - offset); |
| |
| if (!ptr) { |
| wl1271_error("No SSID in IEs!"); |
| return -ENOENT; |
| } |
| |
| ssid_len = ptr[1]; |
| if (ssid_len > IEEE80211_MAX_SSID_LEN) { |
| wl1271_error("SSID is too long!"); |
| return -EINVAL; |
| } |
| |
| wl->ssid_len = ssid_len; |
| memcpy(wl->ssid, ptr+2, ssid_len); |
| return 0; |
| } |
| |
| static void wl12xx_remove_ie(struct sk_buff *skb, u8 eid, int ieoffset) |
| { |
| int len; |
| const u8 *next, *end = skb->data + skb->len; |
| u8 *ie = (u8 *)cfg80211_find_ie(eid, skb->data + ieoffset, |
| skb->len - ieoffset); |
| if (!ie) |
| return; |
| len = ie[1] + 2; |
| next = ie + len; |
| memmove(ie, next, end - next); |
| skb_trim(skb, skb->len - len); |
| } |
| |
| static void wl12xx_remove_vendor_ie(struct sk_buff *skb, |
| unsigned int oui, u8 oui_type, |
| int ieoffset) |
| { |
| int len; |
| const u8 *next, *end = skb->data + skb->len; |
| u8 *ie = (u8 *)cfg80211_find_vendor_ie(oui, oui_type, |
| skb->data + ieoffset, |
| skb->len - ieoffset); |
| if (!ie) |
| return; |
| len = ie[1] + 2; |
| next = ie + len; |
| memmove(ie, next, end - next); |
| skb_trim(skb, skb->len - len); |
| } |
| |
| static int wl1271_ap_set_probe_resp_tmpl(struct wl1271 *wl, |
|