summaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/intel/iwlwifi/mvm/ops.c')
-rw-r--r--drivers/net/wireless/intel/iwlwifi/mvm/ops.c100
1 files changed, 69 insertions, 31 deletions
diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
index adbbe19aeae5..a93981cb9714 100644
--- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
+++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c
@@ -1,6 +1,6 @@
// SPDX-License-Identifier: GPL-2.0 OR BSD-3-Clause
/*
- * Copyright (C) 2012-2014, 2018-2023 Intel Corporation
+ * Copyright (C) 2012-2014, 2018-2024 Intel Corporation
* Copyright (C) 2013-2015 Intel Mobile Communications GmbH
* Copyright (C) 2016-2017 Intel Deutschland GmbH
*/
@@ -161,9 +161,9 @@ static void iwl_mvm_rx_monitor_notif(struct iwl_mvm *mvm,
if (!vif || vif->type != NL80211_IFTYPE_STATION)
return;
- if (!vif->bss_conf.chandef.chan ||
- vif->bss_conf.chandef.chan->band != NL80211_BAND_2GHZ ||
- vif->bss_conf.chandef.width < NL80211_CHAN_WIDTH_40)
+ if (!vif->bss_conf.chanreq.oper.chan ||
+ vif->bss_conf.chanreq.oper.chan->band != NL80211_BAND_2GHZ ||
+ vif->bss_conf.chanreq.oper.width < NL80211_CHAN_WIDTH_40)
return;
if (!vif->cfg.assoc)
@@ -219,7 +219,7 @@ void iwl_mvm_update_link_smps(struct ieee80211_vif *vif,
return;
if (mvm->fw_static_smps_request &&
- link_conf->chandef.width == NL80211_CHAN_WIDTH_160 &&
+ link_conf->chanreq.oper.width == NL80211_CHAN_WIDTH_160 &&
link_conf->he_support)
mode = IEEE80211_SMPS_STATIC;
@@ -259,7 +259,7 @@ static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm,
}
/**
- * enum iwl_rx_handler_context context for Rx handler
+ * enum iwl_rx_handler_context: context for Rx handler
* @RX_HANDLER_SYNC : this means that it will be called in the Rx path
* which can't acquire mvm->mutex.
* @RX_HANDLER_ASYNC_LOCKED : If the handler needs to hold mvm->mutex
@@ -267,15 +267,19 @@ static void iwl_mvm_rx_thermal_dual_chain_req(struct iwl_mvm *mvm,
* it will be called from a worker with mvm->mutex held.
* @RX_HANDLER_ASYNC_UNLOCKED : in case the handler needs to lock the
* mutex itself, it will be called from a worker without mvm->mutex held.
+ * @RX_HANDLER_ASYNC_LOCKED_WIPHY: If the handler needs to hold the wiphy lock
+ * and mvm->mutex. Will be handled with the wiphy_work queue infra
+ * instead of regular work queue.
*/
enum iwl_rx_handler_context {
RX_HANDLER_SYNC,
RX_HANDLER_ASYNC_LOCKED,
RX_HANDLER_ASYNC_UNLOCKED,
+ RX_HANDLER_ASYNC_LOCKED_WIPHY,
};
/**
- * struct iwl_rx_handlers handler for FW notification
+ * struct iwl_rx_handlers: handler for FW notification
* @cmd_id: command id
* @min_size: minimum size to expect for the notification
* @context: see &iwl_rx_handler_context
@@ -316,7 +320,8 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
struct iwl_tlc_update_notif),
RX_HANDLER(BT_PROFILE_NOTIFICATION, iwl_mvm_rx_bt_coex_notif,
- RX_HANDLER_ASYNC_LOCKED, struct iwl_bt_coex_profile_notif),
+ RX_HANDLER_ASYNC_LOCKED_WIPHY,
+ struct iwl_bt_coex_profile_notif),
RX_HANDLER_NO_SIZE(BEACON_NOTIFICATION, iwl_mvm_rx_beacon_notif,
RX_HANDLER_ASYNC_LOCKED),
RX_HANDLER_NO_SIZE(STATISTICS_NOTIFICATION, iwl_mvm_rx_statistics,
@@ -324,7 +329,7 @@ static const struct iwl_rx_handlers iwl_mvm_rx_handlers[] = {
RX_HANDLER_GRP(STATISTICS_GROUP, STATISTICS_OPER_NOTIF,
iwl_mvm_handle_rx_system_oper_stats,
- RX_HANDLER_ASYNC_LOCKED,
+ RX_HANDLER_ASYNC_LOCKED_WIPHY,
struct iwl_system_statistics_notif_oper),
RX_HANDLER_GRP(STATISTICS_GROUP, STATISTICS_OPER_PART1_NOTIF,
iwl_mvm_handle_rx_system_oper_part1_stats,
@@ -673,6 +678,8 @@ static const struct iwl_hcmd_arr iwl_mvm_groups[] = {
/* this forward declaration can avoid to export the function */
static void iwl_mvm_async_handlers_wk(struct work_struct *wk);
+static void iwl_mvm_async_handlers_wiphy_wk(struct wiphy *wiphy,
+ struct wiphy_work *work);
static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm)
{
@@ -682,7 +689,7 @@ static u32 iwl_mvm_min_backoff(struct iwl_mvm *mvm)
if (!backoff)
return 0;
- dflt_pwr_limit = iwl_acpi_get_pwr_limit(mvm->dev);
+ iwl_bios_get_pwr_limit(&mvm->fwrt, &dflt_pwr_limit);
while (backoff->pwr) {
if (dflt_pwr_limit >= backoff->pwr)
@@ -1194,7 +1201,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
iwl_fw_runtime_init(&mvm->fwrt, trans, fw, &iwl_mvm_fwrt_ops, mvm,
&iwl_mvm_sanitize_ops, mvm, dbgfs_dir);
- iwl_mvm_get_acpi_tables(mvm);
+ iwl_mvm_get_bios_tables(mvm);
iwl_uefi_get_sgom_table(trans, &mvm->fwrt);
iwl_uefi_get_step_table(trans);
@@ -1265,6 +1272,8 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg,
INIT_LIST_HEAD(&mvm->add_stream_txqs);
spin_lock_init(&mvm->add_stream_lock);
+ wiphy_work_init(&mvm->async_handlers_wiphy_wk,
+ iwl_mvm_async_handlers_wiphy_wk);
init_waitqueue_head(&mvm->rx_sync_waitq);
mvm->queue_sync_state = 0;
@@ -1551,35 +1560,62 @@ void iwl_mvm_async_handlers_purge(struct iwl_mvm *mvm)
spin_unlock_bh(&mvm->async_handlers_lock);
}
-static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
+/*
+ * This function receives a bitmap of rx async handler contexts
+ * (&iwl_rx_handler_context) to handle, and runs only them
+ */
+static void iwl_mvm_async_handlers_by_context(struct iwl_mvm *mvm,
+ u8 contexts)
{
- struct iwl_mvm *mvm =
- container_of(wk, struct iwl_mvm, async_handlers_wk);
struct iwl_async_handler_entry *entry, *tmp;
LIST_HEAD(local_list);
- /* Ensure that we are not in stop flow (check iwl_mvm_mac_stop) */
-
/*
- * Sync with Rx path with a lock. Remove all the entries from this list,
- * add them to a local one (lock free), and then handle them.
+ * Sync with Rx path with a lock. Remove all the entries of the
+ * wanted contexts from this list, add them to a local one (lock free),
+ * and then handle them.
*/
spin_lock_bh(&mvm->async_handlers_lock);
- list_splice_init(&mvm->async_handlers_list, &local_list);
+ list_for_each_entry_safe(entry, tmp, &mvm->async_handlers_list, list) {
+ if (!(BIT(entry->context) & contexts))
+ continue;
+ list_del(&entry->list);
+ list_add_tail(&entry->list, &local_list);
+ }
spin_unlock_bh(&mvm->async_handlers_lock);
list_for_each_entry_safe(entry, tmp, &local_list, list) {
- if (entry->context == RX_HANDLER_ASYNC_LOCKED)
+ if (entry->context != RX_HANDLER_ASYNC_UNLOCKED)
mutex_lock(&mvm->mutex);
entry->fn(mvm, &entry->rxb);
iwl_free_rxb(&entry->rxb);
list_del(&entry->list);
- if (entry->context == RX_HANDLER_ASYNC_LOCKED)
+ if (entry->context != RX_HANDLER_ASYNC_UNLOCKED)
mutex_unlock(&mvm->mutex);
kfree(entry);
}
}
+static void iwl_mvm_async_handlers_wiphy_wk(struct wiphy *wiphy,
+ struct wiphy_work *wk)
+{
+ struct iwl_mvm *mvm =
+ container_of(wk, struct iwl_mvm, async_handlers_wiphy_wk);
+ u8 contexts = BIT(RX_HANDLER_ASYNC_LOCKED_WIPHY);
+
+ iwl_mvm_async_handlers_by_context(mvm, contexts);
+}
+
+static void iwl_mvm_async_handlers_wk(struct work_struct *wk)
+{
+ struct iwl_mvm *mvm =
+ container_of(wk, struct iwl_mvm, async_handlers_wk);
+ u8 contexts = BIT(RX_HANDLER_ASYNC_LOCKED) |
+ BIT(RX_HANDLER_ASYNC_UNLOCKED);
+
+ iwl_mvm_async_handlers_by_context(mvm, contexts);
+}
+
static inline void iwl_mvm_rx_check_trigger(struct iwl_mvm *mvm,
struct iwl_rx_packet *pkt)
{
@@ -1659,7 +1695,11 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm,
spin_lock(&mvm->async_handlers_lock);
list_add_tail(&entry->list, &mvm->async_handlers_list);
spin_unlock(&mvm->async_handlers_lock);
- schedule_work(&mvm->async_handlers_wk);
+ if (rx_h->context == RX_HANDLER_ASYNC_LOCKED_WIPHY)
+ wiphy_work_queue(mvm->hw->wiphy,
+ &mvm->async_handlers_wiphy_wk);
+ else
+ schedule_work(&mvm->async_handlers_wk);
break;
}
}
@@ -1788,12 +1828,8 @@ static void iwl_mvm_wake_sw_queue(struct iwl_op_mode *op_mode, int hw_queue)
static void iwl_mvm_set_rfkill_state(struct iwl_mvm *mvm)
{
- bool state = iwl_mvm_is_radio_killed(mvm);
-
- if (state)
- wake_up(&mvm->rx_sync_waitq);
-
- wiphy_rfkill_set_hw_state(mvm->hw->wiphy, state);
+ wiphy_rfkill_set_hw_state(mvm->hw->wiphy,
+ iwl_mvm_is_radio_killed(mvm));
}
void iwl_mvm_set_hw_ctkill_state(struct iwl_mvm *mvm, bool state)
@@ -1818,10 +1854,12 @@ static bool iwl_mvm_set_hw_rfkill_state(struct iwl_op_mode *op_mode, bool state)
bool rfkill_safe_init_done = READ_ONCE(mvm->rfkill_safe_init_done);
bool unified = iwl_mvm_has_unified_ucode(mvm);
- if (state)
+ if (state) {
set_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
- else
+ wake_up(&mvm->rx_sync_waitq);
+ } else {
clear_bit(IWL_MVM_STATUS_HW_RFKILL, &mvm->status);
+ }
iwl_mvm_set_rfkill_state(mvm);
@@ -1955,7 +1993,7 @@ void iwl_mvm_nic_restart(struct iwl_mvm *mvm, bool fw_error)
ieee80211_restart_hw(mvm->hw);
} else if (mvm->fwrt.trans->dbg.restart_required) {
IWL_DEBUG_INFO(mvm, "FW restart requested after debug collection\n");
- mvm->fwrt.trans->dbg.restart_required = FALSE;
+ mvm->fwrt.trans->dbg.restart_required = false;
ieee80211_restart_hw(mvm->hw);
} else if (mvm->trans->trans_cfg->device_family <= IWL_DEVICE_FAMILY_8000) {
ieee80211_restart_hw(mvm->hw);