diff options
Diffstat (limited to 'drivers/net/wireless/intel')
64 files changed, 3130 insertions, 1007 deletions
diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.c b/drivers/net/wireless/intel/ipw2x00/ipw2100.c index b8fd3cc90634..910db46db6a1 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.c @@ -692,7 +692,7 @@ static void printk_buf(int level, const u8 * data, u32 len) static void schedule_reset(struct ipw2100_priv *priv) { - unsigned long now = get_seconds(); + time64_t now = ktime_get_boottime_seconds(); /* If we haven't received a reset request within the backoff period, * then we can reset the backoff interval so this reset occurs @@ -701,10 +701,10 @@ static void schedule_reset(struct ipw2100_priv *priv) (now - priv->last_reset > priv->reset_backoff)) priv->reset_backoff = 0; - priv->last_reset = get_seconds(); + priv->last_reset = now; if (!(priv->status & STATUS_RESET_PENDING)) { - IPW_DEBUG_INFO("%s: Scheduling firmware restart (%ds).\n", + IPW_DEBUG_INFO("%s: Scheduling firmware restart (%llds).\n", priv->net_dev->name, priv->reset_backoff); netif_carrier_off(priv->net_dev); netif_stop_queue(priv->net_dev); @@ -2079,7 +2079,7 @@ static void isr_indicate_associated(struct ipw2100_priv *priv, u32 status) memcpy(priv->bssid, bssid, ETH_ALEN); priv->status |= STATUS_ASSOCIATING; - priv->connect_start = get_seconds(); + priv->connect_start = ktime_get_boottime_seconds(); schedule_delayed_work(&priv->wx_event_work, HZ / 10); } @@ -4070,8 +4070,8 @@ static ssize_t show_internals(struct device *d, struct device_attribute *attr, #define DUMP_VAR(x,y) len += sprintf(buf + len, # x ": %" y "\n", priv-> x) if (priv->status & STATUS_ASSOCIATED) - len += sprintf(buf + len, "connected: %lu\n", - get_seconds() - priv->connect_start); + len += sprintf(buf + len, "connected: %llu\n", + ktime_get_boottime_seconds() - priv->connect_start); else len += sprintf(buf + len, "not connected\n"); @@ -4108,7 +4108,7 @@ static ssize_t show_internals(struct device *d, struct device_attribute *attr, DUMP_VAR(txq_stat.lo, "d"); DUMP_VAR(ieee->scans, "d"); - DUMP_VAR(reset_backoff, "d"); + DUMP_VAR(reset_backoff, "lld"); return len; } @@ -5112,11 +5112,9 @@ static int ipw2100_disassociate_bssid(struct ipw2100_priv *priv) .host_command_length = ETH_ALEN }; int err; - int len; IPW_DEBUG_HC("DISASSOCIATION_BSSID\n"); - len = ETH_ALEN; /* The Firmware currently ignores the BSSID and just disassociates from * the currently associated AP -- but in the off chance that a future * firmware does use the BSSID provided here, we go ahead and try and @@ -6437,7 +6435,7 @@ static int ipw2100_suspend(struct pci_dev *pci_dev, pm_message_t state) pci_disable_device(pci_dev); pci_set_power_state(pci_dev, PCI_D3hot); - priv->suspend_at = get_seconds(); + priv->suspend_at = ktime_get_boottime_seconds(); mutex_unlock(&priv->action_mutex); @@ -6482,7 +6480,7 @@ static int ipw2100_resume(struct pci_dev *pci_dev) * the queue of needed */ netif_device_attach(dev); - priv->suspend_time = get_seconds() - priv->suspend_at; + priv->suspend_time = ktime_get_boottime_seconds() - priv->suspend_at; /* Bring the device back up */ if (!(priv->status & STATUS_RF_KILL_SW)) @@ -7723,7 +7721,6 @@ static int ipw2100_wx_get_auth(struct net_device *dev, struct libipw_device *ieee = priv->ieee; struct lib80211_crypt_data *crypt; struct iw_param *param = &wrqu->param; - int ret = 0; switch (param->flags & IW_AUTH_INDEX) { case IW_AUTH_WPA_VERSION: @@ -7733,7 +7730,6 @@ static int ipw2100_wx_get_auth(struct net_device *dev, /* * wpa_supplicant will control these internally */ - ret = -EOPNOTSUPP; break; case IW_AUTH_TKIP_COUNTERMEASURES: @@ -7801,9 +7797,6 @@ static int ipw2100_wx_set_mlme(struct net_device *dev, { struct ipw2100_priv *priv = libipw_priv(dev); struct iw_mlme *mlme = (struct iw_mlme *)extra; - __le16 reason; - - reason = cpu_to_le16(mlme->reason_code); switch (mlme->cmd) { case IW_MLME_DEAUTH: diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2100.h b/drivers/net/wireless/intel/ipw2x00/ipw2100.h index ce3e35f6b60f..8c11c7fa2eef 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2100.h +++ b/drivers/net/wireless/intel/ipw2x00/ipw2100.h @@ -491,7 +491,7 @@ struct ipw2100_priv { /* Statistics */ int resets; - int reset_backoff; + time64_t reset_backoff; /* Context */ u8 essid[IW_ESSID_MAX_SIZE]; @@ -500,8 +500,8 @@ struct ipw2100_priv { u8 channel; int last_mode; - unsigned long connect_start; - unsigned long last_reset; + time64_t connect_start; + time64_t last_reset; u32 channel_mask; u32 fatal_error; @@ -581,9 +581,9 @@ struct ipw2100_priv { int user_requested_scan; - /* Track time in suspend */ - unsigned long suspend_at; - unsigned long suspend_time; + /* Track time in suspend, using CLOCK_BOOTTIME */ + time64_t suspend_at; + time64_t suspend_time; u32 interrupts; int tx_interrupts; diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.c b/drivers/net/wireless/intel/ipw2x00/ipw2200.c index 8a858f7e36f4..9644e7b93645 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.c +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.c @@ -7112,7 +7112,7 @@ static u32 ipw_qos_get_burst_duration(struct ipw_priv *priv) { u32 ret = 0; - if ((priv == NULL)) + if (!priv) return 0; if (!(priv->ieee->modulation & LIBIPW_OFDM_MODULATION)) @@ -11888,7 +11888,7 @@ static int ipw_pci_suspend(struct pci_dev *pdev, pm_message_t state) pci_disable_device(pdev); pci_set_power_state(pdev, pci_choose_state(pdev, state)); - priv->suspend_at = get_seconds(); + priv->suspend_at = ktime_get_boottime_seconds(); return 0; } @@ -11925,7 +11925,7 @@ static int ipw_pci_resume(struct pci_dev *pdev) * the queue of needed */ netif_device_attach(dev); - priv->suspend_time = get_seconds() - priv->suspend_at; + priv->suspend_time = ktime_get_boottime_seconds() - priv->suspend_at; /* Bring the device back up */ schedule_work(&priv->up); diff --git a/drivers/net/wireless/intel/ipw2x00/ipw2200.h b/drivers/net/wireless/intel/ipw2x00/ipw2200.h index aa301d1eee3c..f98ab1f71edd 100644 --- a/drivers/net/wireless/intel/ipw2x00/ipw2200.h +++ b/drivers/net/wireless/intel/ipw2x00/ipw2200.h @@ -1343,9 +1343,9 @@ struct ipw_priv { s8 tx_power; - /* Track time in suspend */ - unsigned long suspend_at; - unsigned long suspend_time; + /* Track time in suspend using CLOCK_BOOTIME */ + time64_t suspend_at; + time64_t suspend_time; #ifdef CONFIG_PM u32 pm_state[16]; diff --git a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c index dd29f46d086b..d32d39fa2686 100644 --- a/drivers/net/wireless/intel/ipw2x00/libipw_wx.c +++ b/drivers/net/wireless/intel/ipw2x00/libipw_wx.c @@ -479,7 +479,6 @@ int libipw_wx_get_encode(struct libipw_device *ieee, { struct iw_point *erq = &(wrqu->encoding); int len, key; - struct lib80211_crypt_data *crypt; struct libipw_security *sec = &ieee->sec; LIBIPW_DEBUG_WX("GET_ENCODE\n"); @@ -492,7 +491,6 @@ int libipw_wx_get_encode(struct libipw_device *ieee, } else key = ieee->crypt_info.tx_keyidx; - crypt = ieee->crypt_info.crypt[key]; erq->flags = key + 1; if (!sec->enabled) { diff --git a/drivers/net/wireless/intel/iwlegacy/3945-debug.c b/drivers/net/wireless/intel/iwlegacy/3945-debug.c index c1b4441fb8b2..a2960032be81 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945-debug.c +++ b/drivers/net/wireless/intel/iwlegacy/3945-debug.c @@ -95,7 +95,7 @@ il3945_ucode_rx_stats_read(struct file *file, char __user *user_buf, pos += scnprintf(buf + pos, bufsz - pos, "%-32s current" - "acumulative delta max\n", + "accumulative delta max\n", "Statistics_Rx - OFDM:"); pos += scnprintf(buf + pos, bufsz - pos, diff --git a/drivers/net/wireless/intel/iwlegacy/3945-mac.c b/drivers/net/wireless/intel/iwlegacy/3945-mac.c index 62a9794f952b..57e3b6cca234 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/3945-mac.c @@ -476,8 +476,6 @@ il3945_tx_skb(struct il_priv *il, int txq_id = skb_get_queue_mapping(skb); u16 len, idx, hdr_len; u16 firstlen, secondlen; - u8 id; - u8 unicast; u8 sta_id; u8 tid = 0; __le16 fc; @@ -496,9 +494,6 @@ il3945_tx_skb(struct il_priv *il, goto drop_unlock; } - unicast = !is_multicast_ether_addr(hdr->addr1); - id = 0; - fc = hdr->frame_control; #ifdef CONFIG_IWLEGACY_DEBUG @@ -957,10 +952,8 @@ il3945_rx_queue_restock(struct il_priv *il) struct list_head *element; struct il_rx_buf *rxb; unsigned long flags; - int write; spin_lock_irqsave(&rxq->lock, flags); - write = rxq->write & ~0x7; while (il_rx_queue_space(rxq) > 0 && rxq->free_count) { /* Get next free Rx buffer, remove from free list */ element = rxq->rx_free.next; @@ -2725,7 +2718,6 @@ void il3945_post_associate(struct il_priv *il) { int rc = 0; - struct ieee80211_conf *conf = NULL; if (!il->vif || !il->is_open) return; @@ -2738,8 +2730,6 @@ il3945_post_associate(struct il_priv *il) il_scan_cancel_timeout(il, 200); - conf = &il->hw->conf; - il->staging.filter_flags &= ~RXON_FILTER_ASSOC_MSK; il3945_commit_rxon(il); diff --git a/drivers/net/wireless/intel/iwlegacy/3945.c b/drivers/net/wireless/intel/iwlegacy/3945.c index dbf164d48ed3..3e568ce2fb20 100644 --- a/drivers/net/wireless/intel/iwlegacy/3945.c +++ b/drivers/net/wireless/intel/iwlegacy/3945.c @@ -1634,7 +1634,6 @@ il3945_hw_reg_set_txpower(struct il_priv *il, s8 power) { struct il_channel_info *ch_info; s8 max_power; - u8 a_band; u8 i; if (il->tx_power_user_lmt == power) { @@ -1650,7 +1649,6 @@ il3945_hw_reg_set_txpower(struct il_priv *il, s8 power) for (i = 0; i < il->channel_count; i++) { ch_info = &il->channel_info[i]; - a_band = il_is_channel_a_band(ch_info); /* find minimum power of all user and regulatory constraints * (does not consider h/w clipping limitations) */ diff --git a/drivers/net/wireless/intel/iwlegacy/4965-mac.c b/drivers/net/wireless/intel/iwlegacy/4965-mac.c index 562e94870a9c..280cd8ae1696 100644 --- a/drivers/net/wireless/intel/iwlegacy/4965-mac.c +++ b/drivers/net/wireless/intel/iwlegacy/4965-mac.c @@ -1338,15 +1338,12 @@ il4965_accumulative_stats(struct il_priv *il, __le32 * stats) u32 *accum_stats; u32 *delta, *max_delta; struct stats_general_common *general, *accum_general; - struct stats_tx *tx, *accum_tx; prev_stats = (__le32 *) &il->_4965.stats; accum_stats = (u32 *) &il->_4965.accum_stats; size = sizeof(struct il_notif_stats); general = &il->_4965.stats.general.common; accum_general = &il->_4965.accum_stats.general.common; - tx = &il->_4965.stats.tx; - accum_tx = &il->_4965.accum_stats.tx; delta = (u32 *) &il->_4965.delta_stats; max_delta = (u32 *) &il->_4965.max_delta; @@ -4784,7 +4781,6 @@ static void il4965_ucode_callback(const struct firmware *ucode_raw, void *context) { struct il_priv *il = context; - struct il_ucode_header *ucode; int err; struct il4965_firmware_pieces pieces; const unsigned int api_max = il->cfg->ucode_api_max; @@ -4814,8 +4810,6 @@ il4965_ucode_callback(const struct firmware *ucode_raw, void *context) } /* Data from ucode file: header followed by uCode images */ - ucode = (struct il_ucode_header *)ucode_raw->data; - err = il4965_load_firmware(il, ucode_raw, &pieces); if (err) diff --git a/drivers/net/wireless/intel/iwlwifi/Makefile b/drivers/net/wireless/intel/iwlwifi/Makefile index 4d08d78c6b71..04e376cc898c 100644 --- a/drivers/net/wireless/intel/iwlwifi/Makefile +++ b/drivers/net/wireless/intel/iwlwifi/Makefile @@ -7,13 +7,13 @@ iwlwifi-objs += iwl-debug.o iwlwifi-objs += iwl-eeprom-read.o iwl-eeprom-parse.o iwlwifi-objs += iwl-phy-db.o iwl-nvm-parse.o iwlwifi-objs += pcie/drv.o pcie/rx.o pcie/tx.o pcie/trans.o -iwlwifi-objs += pcie/ctxt-info.o pcie/trans-gen2.o pcie/tx-gen2.o +iwlwifi-objs += pcie/ctxt-info.o pcie/ctxt-info-gen3.o +iwlwifi-objs += pcie/trans-gen2.o pcie/tx-gen2.o iwlwifi-$(CONFIG_IWLDVM) += cfg/1000.o cfg/2000.o cfg/5000.o cfg/6000.o iwlwifi-$(CONFIG_IWLMVM) += cfg/7000.o cfg/8000.o cfg/9000.o cfg/22000.o iwlwifi-objs += iwl-trans.o iwlwifi-objs += fw/notif-wait.o iwlwifi-$(CONFIG_IWLMVM) += fw/paging.o fw/smem.o fw/init.o fw/dbg.o -iwlwifi-$(CONFIG_IWLMVM) += fw/common_rx.o iwlwifi-$(CONFIG_ACPI) += fw/acpi.o iwlwifi-$(CONFIG_IWLWIFI_DEBUGFS) += fw/debugfs.o diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c index a63ca8820568..fedb108db68f 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/2000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/2000.c @@ -63,6 +63,7 @@ static const struct iwl_base_params iwl2000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE, .num_of_queues = IWLAGN_NUM_QUEUES, + .max_tfd_queue_size = 256, .max_ll_items = OTP_MAX_LL_ITEMS_2x00, .shadow_ram_support = true, .led_compensation = 51, @@ -76,6 +77,7 @@ static const struct iwl_base_params iwl2000_base_params = { static const struct iwl_base_params iwl2030_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE, .num_of_queues = IWLAGN_NUM_QUEUES, + .max_tfd_queue_size = 256, .max_ll_items = OTP_MAX_LL_ITEMS_2x00, .shadow_ram_support = true, .led_compensation = 57, diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c index d4ba66aecdc9..91ca77c7571c 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/22000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/22000.c @@ -59,7 +59,7 @@ #define IWL_22000_UCODE_API_MAX 38 /* Lowest firmware API version supported */ -#define IWL_22000_UCODE_API_MIN 24 +#define IWL_22000_UCODE_API_MIN 39 /* NVM versions */ #define IWL_22000_NVM_VERSION 0x0a1d @@ -73,29 +73,48 @@ #define IWL_22000_SMEM_OFFSET 0x400000 #define IWL_22000_SMEM_LEN 0xD0000 -#define IWL_22000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-" -#define IWL_22000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-" -#define IWL_22000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-" -#define IWL_22000_HR_F0_FW_PRE "iwlwifi-QuQnj-f0-hr-a0-" -#define IWL_22000_JF_B0_FW_PRE "iwlwifi-QuQnj-a0-jf-b0-" -#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-" +#define IWL_22000_JF_FW_PRE "iwlwifi-Qu-a0-jf-b0-" +#define IWL_22000_HR_FW_PRE "iwlwifi-Qu-a0-hr-a0-" +#define IWL_22000_HR_CDB_FW_PRE "iwlwifi-QuIcp-z0-hrcdb-a0-" +#define IWL_22000_HR_A_F0_FW_PRE "iwlwifi-QuQnj-f0-hr-a0-" +#define IWL_22000_HR_B_FW_PRE "iwlwifi-Qu-b0-hr-b0-" +#define IWL_22000_JF_B0_FW_PRE "iwlwifi-QuQnj-a0-jf-b0-" +#define IWL_22000_HR_A0_FW_PRE "iwlwifi-QuQnj-a0-hr-a0-" +#define IWL_22000_SU_Z0_FW_PRE "iwlwifi-su-z0-" #define IWL_22000_HR_MODULE_FIRMWARE(api) \ IWL_22000_HR_FW_PRE __stringify(api) ".ucode" #define IWL_22000_JF_MODULE_FIRMWARE(api) \ IWL_22000_JF_FW_PRE __stringify(api) ".ucode" -#define IWL_22000_HR_F0_QNJ_MODULE_FIRMWARE(api) \ - IWL_22000_HR_F0_FW_PRE __stringify(api) ".ucode" +#define IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(api) \ + IWL_22000_HR_A_F0_FW_PRE __stringify(api) ".ucode" +#define IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(api) \ + IWL_22000_HR_B_FW_PRE __stringify(api) ".ucode" #define IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(api) \ IWL_22000_JF_B0_FW_PRE __stringify(api) ".ucode" #define IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(api) \ IWL_22000_HR_A0_FW_PRE __stringify(api) ".ucode" +#define IWL_22000_SU_Z0_MODULE_FIRMWARE(api) \ + IWL_22000_SU_Z0_FW_PRE __stringify(api) ".ucode" #define NVM_HW_SECTION_NUM_FAMILY_22000 10 static const struct iwl_base_params iwl_22000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_22000, .num_of_queues = 512, + .max_tfd_queue_size = 256, + .shadow_ram_support = true, + .led_compensation = 57, + .wd_timeout = IWL_LONG_WD_TIMEOUT, + .max_event_log_size = 512, + .shadow_reg_enable = true, + .pcie_l1_allowed = true, +}; + +static const struct iwl_base_params iwl_22560_base_params = { + .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_22000, + .num_of_queues = 512, + .max_tfd_queue_size = 65536, .shadow_ram_support = true, .led_compensation = 57, .wd_timeout = IWL_LONG_WD_TIMEOUT, @@ -110,11 +129,9 @@ static const struct iwl_ht_params iwl_22000_ht_params = { .ht40_bands = BIT(NL80211_BAND_2GHZ) | BIT(NL80211_BAND_5GHZ), }; -#define IWL_DEVICE_22000 \ +#define IWL_DEVICE_22000_COMMON \ .ucode_api_max = IWL_22000_UCODE_API_MAX, \ .ucode_api_min = IWL_22000_UCODE_API_MIN, \ - .device_family = IWL_DEVICE_FAMILY_22000, \ - .base_params = &iwl_22000_base_params, \ .led_mode = IWL_LED_RF_STATE, \ .nvm_hw_section_num = NVM_HW_SECTION_NUM_FAMILY_22000, \ .non_shared_ant = ANT_A, \ @@ -129,6 +146,10 @@ static const struct iwl_ht_params iwl_22000_ht_params = { .mq_rx_supported = true, \ .vht_mu_mimo_supported = true, \ .mac_addr_from_csr = true, \ + .ht_params = &iwl_22000_ht_params, \ + .nvm_ver = IWL_22000_NVM_VERSION, \ + .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, \ + .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, \ .use_tfh = true, \ .rf_id = true, \ .gen2 = true, \ @@ -136,86 +157,114 @@ static const struct iwl_ht_params iwl_22000_ht_params = { .dbgc_supported = true, \ .min_umac_error_event_table = 0x400000 +#define IWL_DEVICE_22500 \ + IWL_DEVICE_22000_COMMON, \ + .device_family = IWL_DEVICE_FAMILY_22000, \ + .base_params = &iwl_22000_base_params, \ + .csr = &iwl_csr_v1 + +#define IWL_DEVICE_22560 \ + IWL_DEVICE_22000_COMMON, \ + .device_family = IWL_DEVICE_FAMILY_22560, \ + .base_params = &iwl_22560_base_params, \ + .csr = &iwl_csr_v2 + const struct iwl_cfg iwl22000_2ac_cfg_hr = { .name = "Intel(R) Dual Band Wireless AC 22000", .fw_name_pre = IWL_22000_HR_FW_PRE, - IWL_DEVICE_22000, - .csr = &iwl_csr_v1, - .ht_params = &iwl_22000_ht_params, - .nvm_ver = IWL_22000_NVM_VERSION, - .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + IWL_DEVICE_22500, }; const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb = { .name = "Intel(R) Dual Band Wireless AC 22000", .fw_name_pre = IWL_22000_HR_CDB_FW_PRE, - IWL_DEVICE_22000, - .csr = &iwl_csr_v1, - .ht_params = &iwl_22000_ht_params, - .nvm_ver = IWL_22000_NVM_VERSION, - .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + IWL_DEVICE_22500, .cdb = true, }; const struct iwl_cfg iwl22000_2ac_cfg_jf = { .name = "Intel(R) Dual Band Wireless AC 22000", .fw_name_pre = IWL_22000_JF_FW_PRE, - IWL_DEVICE_22000, - .csr = &iwl_csr_v1, - .ht_params = &iwl_22000_ht_params, - .nvm_ver = IWL_22000_NVM_VERSION, - .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + IWL_DEVICE_22500, }; const struct iwl_cfg iwl22000_2ax_cfg_hr = { .name = "Intel(R) Dual Band Wireless AX 22000", .fw_name_pre = IWL_22000_HR_FW_PRE, - IWL_DEVICE_22000, - .csr = &iwl_csr_v1, - .ht_params = &iwl_22000_ht_params, - .nvm_ver = IWL_22000_NVM_VERSION, - .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + IWL_DEVICE_22500, + /* + * This device doesn't support receiving BlockAck with a large bitmap + * so we need to restrict the size of transmitted aggregation to the + * HT size; mac80211 would otherwise pick the HE max (256) by default. + */ + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; -const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_f0 = { +const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0 = { .name = "Intel(R) Dual Band Wireless AX 22000", - .fw_name_pre = IWL_22000_HR_F0_FW_PRE, - IWL_DEVICE_22000, - .csr = &iwl_csr_v1, - .ht_params = &iwl_22000_ht_params, - .nvm_ver = IWL_22000_NVM_VERSION, - .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + .fw_name_pre = IWL_22000_HR_A_F0_FW_PRE, + IWL_DEVICE_22500, + /* + * This device doesn't support receiving BlockAck with a large bitmap + * so we need to restrict the size of transmitted aggregation to the + * HT size; mac80211 would otherwise pick the HE max (256) by default. + */ + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, +}; + +const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0 = { + .name = "Intel(R) Dual Band Wireless AX 22000", + .fw_name_pre = IWL_22000_HR_B_FW_PRE, + IWL_DEVICE_22500, + /* + * This device doesn't support receiving BlockAck with a large bitmap + * so we need to restrict the size of transmitted aggregation to the + * HT size; mac80211 would otherwise pick the HE max (256) by default. + */ + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0 = { .name = "Intel(R) Dual Band Wireless AX 22000", .fw_name_pre = IWL_22000_JF_B0_FW_PRE, - IWL_DEVICE_22000, - .csr = &iwl_csr_v1, - .ht_params = &iwl_22000_ht_params, - .nvm_ver = IWL_22000_NVM_VERSION, - .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + IWL_DEVICE_22500, + /* + * This device doesn't support receiving BlockAck with a large bitmap + * so we need to restrict the size of transmitted aggregation to the + * HT size; mac80211 would otherwise pick the HE max (256) by default. + */ + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0 = { .name = "Intel(R) Dual Band Wireless AX 22000", .fw_name_pre = IWL_22000_HR_A0_FW_PRE, - IWL_DEVICE_22000, - .csr = &iwl_csr_v1, - .ht_params = &iwl_22000_ht_params, - .nvm_ver = IWL_22000_NVM_VERSION, - .nvm_calib_ver = IWL_22000_TX_POWER_VERSION, - .max_ht_ampdu_exponent = IEEE80211_HT_MAX_AMPDU_64K, + IWL_DEVICE_22500, + /* + * This device doesn't support receiving BlockAck with a large bitmap + * so we need to restrict the size of transmitted aggregation to the + * HT size; mac80211 would otherwise pick the HE max (256) by default. + */ + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, +}; + +const struct iwl_cfg iwl22560_2ax_cfg_su_cdb = { + .name = "Intel(R) Dual Band Wireless AX 22560", + .fw_name_pre = IWL_22000_SU_Z0_FW_PRE, + IWL_DEVICE_22560, + .cdb = true, + /* + * This device doesn't support receiving BlockAck with a large bitmap + * so we need to restrict the size of transmitted aggregation to the + * HT size; mac80211 would otherwise pick the HE max (256) by default. + */ + .max_tx_agg_size = IEEE80211_MAX_AMPDU_BUF_HT, }; MODULE_FIRMWARE(IWL_22000_HR_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_JF_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); -MODULE_FIRMWARE(IWL_22000_HR_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_22000_HR_A_F0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_22000_HR_B_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_JF_B0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); MODULE_FIRMWARE(IWL_22000_HR_A0_QNJ_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); +MODULE_FIRMWARE(IWL_22000_SU_Z0_MODULE_FIRMWARE(IWL_22000_UCODE_API_MAX)); diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c index a224f1be1ec2..36151e61a26f 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/5000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/5000.c @@ -53,6 +53,7 @@ static const struct iwl_base_params iwl5000_base_params = { .eeprom_size = IWLAGN_EEPROM_IMG_SIZE, .num_of_queues = IWLAGN_NUM_QUEUES, + .max_tfd_queue_size = 256, .pll_cfg = true, .led_compensation = 51, .wd_timeout = IWL_WATCHDOG_DISABLED, diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c index dbcec7ce7863..b5d8274761d8 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/6000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/6000.c @@ -72,6 +72,7 @@ static const struct iwl_base_params iwl6000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE, .num_of_queues = IWLAGN_NUM_QUEUES, + .max_tfd_queue_size = 256, .max_ll_items = OTP_MAX_LL_ITEMS_6x00, .shadow_ram_support = true, .led_compensation = 51, @@ -84,6 +85,7 @@ static const struct iwl_base_params iwl6000_base_params = { static const struct iwl_base_params iwl6050_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE, .num_of_queues = IWLAGN_NUM_QUEUES, + .max_tfd_queue_size = 256, .max_ll_items = OTP_MAX_LL_ITEMS_6x50, .shadow_ram_support = true, .led_compensation = 51, @@ -96,6 +98,7 @@ static const struct iwl_base_params iwl6050_base_params = { static const struct iwl_base_params iwl6000_g2_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE, .num_of_queues = IWLAGN_NUM_QUEUES, + .max_tfd_queue_size = 256, .max_ll_items = OTP_MAX_LL_ITEMS_6x00, .shadow_ram_support = true, .led_compensation = 57, diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c index 69bfa827e82a..a62c8346f13a 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/7000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/7000.c @@ -123,6 +123,7 @@ static const struct iwl_base_params iwl7000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_7000, .num_of_queues = 31, + .max_tfd_queue_size = 256, .shadow_ram_support = true, .led_compensation = 57, .wd_timeout = IWL_LONG_WD_TIMEOUT, diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c index 7262e973e0d6..c46fa712985b 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/8000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/8000.c @@ -104,6 +104,7 @@ static const struct iwl_base_params iwl8000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_8000, .num_of_queues = 31, + .max_tfd_queue_size = 256, .shadow_ram_support = true, .led_compensation = 57, .wd_timeout = IWL_LONG_WD_TIMEOUT, diff --git a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c index c8ea63d02619..24b2f7cbb308 100644 --- a/drivers/net/wireless/intel/iwlwifi/cfg/9000.c +++ b/drivers/net/wireless/intel/iwlwifi/cfg/9000.c @@ -95,6 +95,7 @@ static const struct iwl_base_params iwl9000_base_params = { .eeprom_size = OTP_LOW_IMAGE_SIZE_FAMILY_9000, .num_of_queues = 31, + .max_tfd_queue_size = 256, .shadow_ram_support = true, .led_compensation = 57, .wd_timeout = IWL_LONG_WD_TIMEOUT, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h index 007bfe7656a4..08d3d8a190f6 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/alive.h @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -187,20 +189,4 @@ struct iwl_card_state_notif { __le32 flags; } __packed; /* CARD_STATE_NTFY_API_S_VER_1 */ -/** - * struct iwl_fseq_ver_mismatch_nty - Notification about version - * - * This notification does not have a direct impact on the init flow. - * It means that another core (not WiFi) has initiated the FSEQ flow - * and updated the FSEQ version. The driver only prints an error when - * this occurs. - * - * @aux_read_fseq_ver: auxiliary read FSEQ version - * @wifi_fseq_ver: FSEQ version (embedded in WiFi) - */ -struct iwl_fseq_ver_mismatch_ntf { - __le32 aux_read_fseq_ver; - __le32 wifi_fseq_ver; -} __packed; /* FSEQ_VER_MISMATCH_NTFY_API_S_VER_1 */ - #endif /* __iwl_fw_api_alive_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h index f285bacc8726..6dad748e5cdc 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/commands.h @@ -193,7 +193,8 @@ enum iwl_legacy_cmds { FW_GET_ITEM_CMD = 0x1a, /** - * @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2, + * @TX_CMD: uses &struct iwl_tx_cmd or &struct iwl_tx_cmd_gen2 or + * &struct iwl_tx_cmd_gen3, * response in &struct iwl_mvm_tx_resp or * &struct iwl_mvm_tx_resp_v3 */ @@ -646,13 +647,6 @@ enum iwl_system_subcmd_ids { * @INIT_EXTENDED_CFG_CMD: &struct iwl_init_extended_cfg_cmd */ INIT_EXTENDED_CFG_CMD = 0x03, - - /** - * @FSEQ_VER_MISMATCH_NTF: Notification about fseq version - * mismatch during init. The format is specified in - * &struct iwl_fseq_ver_mismatch_ntf. - */ - FSEQ_VER_MISMATCH_NTF = 0xFF, }; #endif /* __iwl_fw_api_commands_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h index 5f6e855006dd..59b3c6e8f37b 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/datapath.h @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright (C) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -83,6 +85,16 @@ enum iwl_data_path_subcmd_ids { TRIGGER_RX_QUEUES_NOTIF_CMD = 0x2, /** + * @STA_HE_CTXT_CMD: &struct iwl_he_sta_context_cmd + */ + STA_HE_CTXT_CMD = 0x7, + + /** + * @RFH_QUEUE_CONFIG_CMD: &struct iwl_rfh_queue_config + */ + RFH_QUEUE_CONFIG_CMD = 0xD, + + /** * @TLC_MNG_CONFIG_CMD: &struct iwl_tlc_config_cmd */ TLC_MNG_CONFIG_CMD = 0xF, diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h index f2e31e040a7b..55594c93b014 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/mac.h @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -28,6 +29,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -279,6 +281,10 @@ enum iwl_mac_filter_flags { MAC_FILTER_OUT_BCAST = BIT(8), MAC_FILTER_IN_CRC32 = BIT(11), MAC_FILTER_IN_PROBE_REQUEST = BIT(12), + /** + * @MAC_FILTER_IN_11AX: mark BSS as supporting 802.11ax + */ + MAC_FILTER_IN_11AX = BIT(14), }; /** @@ -406,4 +412,170 @@ struct iwl_missed_beacons_notif { __le32 num_recvd_beacons; } __packed; /* MISSED_BEACON_NTFY_API_S_VER_3 */ +/** + * struct iwl_he_backoff_conf - used for backoff configuration + * Per each trigger-based AC, (set by MU EDCA Parameter set info-element) + * used for backoff configuration of TXF5..TXF8 trigger based. + * The MU-TIMER is reloaded w/ MU_TIME each time a frame from the AC is sent via + * trigger-based TX. + * @cwmin: CW min + * @cwmax: CW max + * @aifsn: AIFSN + * AIFSN=0, means that no backoff from the specified TRIG-BASED AC is + * allowed till the MU-TIMER is 0 + * @mu_time: MU time in 8TU units + */ +struct iwl_he_backoff_conf { + __le16 cwmin; + __le16 cwmax; + __le16 aifsn; + __le16 mu_time; +} __packed; /* AC_QOS_DOT11AX_API_S */ + +#define MAX_HE_SUPP_NSS 2 +#define MAX_HE_CHANNEL_BW_INDX 4 + +/** + * struct iwl_he_pkt_ext - QAM thresholds + * The required PPE is set via HE Capabilities IE, per Nss x BW x MCS + * The IE is organized in the following way: + * Support for Nss x BW (or RU) matrix: + * (0=SISO, 1=MIMO2) x (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz) + * Each entry contains 2 QAM thresholds for 8us and 16us: + * 0=BPSK, 1=QPSK, 2=16QAM, 3=64QAM, 4=256QAM, 5=1024QAM, 6/7=RES + * i.e. QAM_th1 < QAM_th2 such if TX uses QAM_tx: + * QAM_tx < QAM_th1 --> PPE=0us + * QAM_th1 <= QAM_tx < QAM_th2 --> PPE=8us + * QAM_th2 <= QAM_tx --> PPE=16us + * @pkt_ext_qam_th: QAM thresholds + * For each Nss/Bw define 2 QAM thrsholds (0..5) + * For rates below the low_th, no need for PPE + * For rates between low_th and high_th, need 8us PPE + * For rates equal or higher then the high_th, need 16us PPE + * Nss (0-siso, 1-mimo2) x BW (0-20MHz, 1-40MHz, 2-80MHz, 3-160MHz) x + * (0-low_th, 1-high_th) + */ +struct iwl_he_pkt_ext { + u8 pkt_ext_qam_th[MAX_HE_SUPP_NSS][MAX_HE_CHANNEL_BW_INDX][2]; +} __packed; /* PKT_EXT_DOT11AX_API_S */ + +/** + * enum iwl_he_sta_ctxt_flags - HE STA context flags + * @STA_CTXT_HE_REF_BSSID_VALID: ref bssid addr valid (for receiving specific + * control frames such as TRIG, NDPA, BACK) + * @STA_CTXT_HE_BSS_COLOR_DIS: BSS color disable, don't use the BSS + * color for RX filter but use MAC header + * @STA_CTXT_HE_PARTIAL_BSS_COLOR: partial BSS color allocation + * @STA_CTXT_HE_32BIT_BA_BITMAP: indicates the receiver supports BA bitmap + * of 32-bits + * @STA_CTXT_HE_PACKET_EXT: indicates that the packet-extension info is valid + * and should be used + * @STA_CTXT_HE_TRIG_RND_ALLOC: indicates that trigger based random allocation + * is enabled according to UORA element existence + * @STA_CTXT_HE_CONST_TRIG_RND_ALLOC: used for AV testing + * @STA_CTXT_HE_ACK_ENABLED: indicates that the AP supports receiving ACK- + * enabled AGG, i.e. both BACK and non-BACK frames in a single AGG + * @STA_CTXT_HE_MU_EDCA_CW: indicates that there is an element of MU EDCA + * parameter set, i.e. the backoff counters for trig-based ACs + */ +enum iwl_he_sta_ctxt_flags { + STA_CTXT_HE_REF_BSSID_VALID = BIT(4), + STA_CTXT_HE_BSS_COLOR_DIS = BIT(5), + STA_CTXT_HE_PARTIAL_BSS_COLOR = BIT(6), + STA_CTXT_HE_32BIT_BA_BITMAP = BIT(7), + STA_CTXT_HE_PACKET_EXT = BIT(8), + STA_CTXT_HE_TRIG_RND_ALLOC = BIT(9), + STA_CTXT_HE_CONST_TRIG_RND_ALLOC = BIT(10), + STA_CTXT_HE_ACK_ENABLED = BIT(11), + STA_CTXT_HE_MU_EDCA_CW = BIT(12), +}; + +/** + * enum iwl_he_htc_flags - HE HTC support flags + * @IWL_HE_HTC_SUPPORT: HE-HTC support + * @IWL_HE_HTC_UL_MU_RESP_SCHED: HE UL MU response schedule + * support via A-control field + * @IWL_HE_HTC_BSR_SUPP: BSR support in A-control field + * @IWL_HE_HTC_OMI_SUPP: A-OMI support in A-control field + * @IWL_HE_HTC_BQR_SUPP: A-BQR support in A-control field + */ +enum iwl_he_htc_flags { + IWL_HE_HTC_SUPPORT = BIT(0), + IWL_HE_HTC_UL_MU_RESP_SCHED = BIT(3), + IWL_HE_HTC_BSR_SUPP = BIT(4), + IWL_HE_HTC_OMI_SUPP = BIT(5), + IWL_HE_HTC_BQR_SUPP = BIT(6), +}; + +/* + * @IWL_HE_HTC_LINK_ADAP_NO_FEEDBACK: the STA does not provide HE MFB + * @IWL_HE_HTC_LINK_ADAP_UNSOLICITED: the STA provides only unsolicited HE MFB + * @IWL_HE_HTC_LINK_ADAP_BOTH: the STA is capable of providing HE MFB in + * response to HE MRQ and if the STA provides unsolicited HE MFB + */ +#define IWL_HE_HTC_LINK_ADAP_POS (1) +#define IWL_HE_HTC_LINK_ADAP_NO_FEEDBACK (0) +#define IWL_HE_HTC_LINK_ADAP_UNSOLICITED (2 << IWL_HE_HTC_LINK_ADAP_POS) +#define IWL_HE_HTC_LINK_ADAP_BOTH (3 << IWL_HE_HTC_LINK_ADAP_POS) + +/** + * struct iwl_he_sta_context_cmd - configure FW to work with HE AP + * @sta_id: STA id + * @tid_limit: max num of TIDs in TX HE-SU multi-TID agg + * 0 - bad value, 1 - multi-tid not supported, 2..8 - tid limit + * @reserved1: reserved byte for future use + * @reserved2: reserved byte for future use + * @flags: see %iwl_11ax_sta_ctxt_flags + * @ref_bssid_addr: reference BSSID used by the AP + * @reserved0: reserved 2 bytes for aligning the ref_bssid_addr field to 8 bytes + * @htc_flags: which features are supported in HTC + * @frag_flags: frag support in A-MSDU + * @frag_level: frag support level + * @frag_max_num: max num of "open" MSDUs in the receiver (in power of 2) + * @frag_min_size: min frag size (except last frag) + * @pkt_ext: optional, exists according to PPE-present bit in the HE-PHY capa + * @bss_color: 11ax AP ID that is used in the HE SIG-A to mark inter BSS frame + * @htc_trig_based_pkt_ext: default PE in 4us units + * @frame_time_rts_th: HE duration RTS threshold, in units of 32us + * @rand_alloc_ecwmin: random CWmin = 2**ECWmin-1 + * @rand_alloc_ecwmax: random CWmax = 2**ECWmax-1 + * @reserved3: reserved byte for future use + * @trig_based_txf: MU EDCA Parameter set for the trigger based traffic queues + */ +struct iwl_he_sta_context_cmd { + u8 sta_id; + u8 tid_limit; + u8 reserved1; + u8 reserved2; + __le32 flags; + + /* The below fields are set via Multiple BSSID IE */ + u8 ref_bssid_addr[6]; + __le16 reserved0; + + /* The below fields are set via HE-capabilities IE */ + __le32 htc_flags; + + u8 frag_flags; + u8 frag_level; + u8 frag_max_num; + u8 frag_min_size; + + /* The below fields are set via PPE thresholds element */ + struct iwl_he_pkt_ext pkt_ext; + + /* The below fields are set via HE-Operation IE */ + u8 bss_color; + u8 htc_trig_based_pkt_ext; + __le16 frame_time_rts_th; + + /* Random access parameter set (i.e. RAPS) */ + u8 rand_alloc_ecwmin; + u8 rand_alloc_ecwmax; + __le16 reserved3; + + /* The below fields are set via MU EDCA parameter set element */ + struct iwl_he_backoff_conf trig_based_txf[AC_NUM]; +} __packed; /* STA_CONTEXT_DOT11AX_API_S */ + #endif /* __iwl_fw_api_mac_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h index 8d6dc9189985..6c5338364794 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/nvm-reg.h @@ -195,7 +195,6 @@ struct iwl_nvm_get_info_general { * @NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED: true if 5.2 band enabled * @NVM_MAC_SKU_FLAGS_802_11N_ENABLED: true if 11n enabled * @NVM_MAC_SKU_FLAGS_802_11AC_ENABLED: true if 11ac enabled - * @NVM_MAC_SKU_FLAGS_802_11AX_ENABLED: true if 11ax enabled * @NVM_MAC_SKU_FLAGS_MIMO_DISABLED: true if MIMO disabled * @NVM_MAC_SKU_FLAGS_WAPI_ENABLED: true if WAPI enabled * @NVM_MAC_SKU_FLAGS_REG_CHECK_ENABLED: true if regulatory checker enabled @@ -206,6 +205,9 @@ enum iwl_nvm_mac_sku_flags { NVM_MAC_SKU_FLAGS_BAND_5_2_ENABLED = BIT(1), NVM_MAC_SKU_FLAGS_802_11N_ENABLED = BIT(2), NVM_MAC_SKU_FLAGS_802_11AC_ENABLED = BIT(3), + /** + * @NVM_MAC_SKU_FLAGS_802_11AX_ENABLED: true if 11ax enabled + */ NVM_MAC_SKU_FLAGS_802_11AX_ENABLED = BIT(4), NVM_MAC_SKU_FLAGS_MIMO_DISABLED = BIT(5), NVM_MAC_SKU_FLAGS_WAPI_ENABLED = BIT(8), diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h index 21e13a315421..087fae91baef 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rs.h @@ -314,8 +314,11 @@ enum { IWL_RATE_MCS_8_INDEX, IWL_RATE_MCS_9_INDEX, IWL_LAST_VHT_RATE = IWL_RATE_MCS_9_INDEX, + IWL_RATE_MCS_10_INDEX, + IWL_RATE_MCS_11_INDEX, + IWL_LAST_HE_RATE = IWL_RATE_MCS_11_INDEX, IWL_RATE_COUNT_LEGACY = IWL_LAST_NON_HT_RATE + 1, - IWL_RATE_COUNT = IWL_LAST_VHT_RATE + 1, + IWL_RATE_COUNT = IWL_LAST_HE_RATE + 1, }; #define IWL_RATE_BIT_MSK(r) BIT(IWL_RATE_##r##M_INDEX) @@ -440,8 +443,8 @@ enum { #define RATE_LEGACY_RATE_MSK 0xff /* Bit 10 - OFDM HE */ -#define RATE_MCS_OFDM_HE_POS 10 -#define RATE_MCS_OFDM_HE_MSK BIT(RATE_MCS_OFDM_HE_POS) +#define RATE_MCS_HE_POS 10 +#define RATE_MCS_HE_MSK BIT(RATE_MCS_HE_POS) /* * Bit 11-12: (0) 20MHz, (1) 40MHz, (2) 80MHz, (3) 160MHz @@ -482,15 +485,33 @@ enum { #define RATE_MCS_BF_MSK (1 << RATE_MCS_BF_POS) /* - * Bit 20-21: HE guard interval and LTF type. - * (0) 1xLTF+1.6us, (1) 2xLTF+0.8us, - * (2) 2xLTF+1.6us, (3) 4xLTF+3.2us + * Bit 20-21: HE LTF type and guard interval + * HE (ext) SU: + * 0 1xLTF+0.8us + * 1 2xLTF+0.8us + * 2 2xLTF+1.6us + * 3 & SGI (bit 13) clear 4xLTF+3.2us + * 3 & SGI (bit 13) set 4xLTF+0.8us + * HE MU: + * 0 4xLTF+0.8us + * 1 2xLTF+0.8us + * 2 2xLTF+1.6us + * 3 4xLTF+3.2us + * HE TRIG: + * 0 1xLTF+1.6us + * 1 2xLTF+1.6us + * 2 4xLTF+3.2us + * 3 (does not occur) */ #define RATE_MCS_HE_GI_LTF_POS 20 #define RATE_MCS_HE_GI_LTF_MSK (3 << RATE_MCS_HE_GI_LTF_POS) /* Bit 22-23: HE type. (0) SU, (1) SU_EXT, (2) MU, (3) trigger based */ #define RATE_MCS_HE_TYPE_POS 22 +#define RATE_MCS_HE_TYPE_SU (0 << RATE_MCS_HE_TYPE_POS) +#define RATE_MCS_HE_TYPE_EXT_SU (1 << RATE_MCS_HE_TYPE_POS) +#define RATE_MCS_HE_TYPE_MU (2 << RATE_MCS_HE_TYPE_POS) +#define RATE_MCS_HE_TYPE_TRIG (3 << RATE_MCS_HE_TYPE_POS) #define RATE_MCS_HE_TYPE_MSK (3 << RATE_MCS_HE_TYPE_POS) /* Bit 24-25: (0) 20MHz (no dup), (1) 2x20MHz, (2) 4x20MHz, 3 8x20MHz */ @@ -501,6 +522,9 @@ enum { #define RATE_MCS_LDPC_POS 27 #define RATE_MCS_LDPC_MSK (1 << RATE_MCS_LDPC_POS) +/* Bit 28: (1) 106-tone RX (8 MHz RU), (0) normal bandwidth */ +#define RATE_MCS_HE_106T_POS 28 +#define RATE_MCS_HE_106T_MSK (1 << RATE_MCS_HE_106T_POS) /* Link Quality definitions */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h index 7e570c4a9df0..2f599353c885 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/rx.h @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -343,6 +345,169 @@ enum iwl_rx_mpdu_mac_info { IWL_RX_MPDU_PHY_PHY_INDEX_MASK = 0xf0, }; +/* + * enum iwl_rx_he_phy - HE PHY data + */ +enum iwl_rx_he_phy { + IWL_RX_HE_PHY_BEAM_CHNG = BIT(0), + IWL_RX_HE_PHY_UPLINK = BIT(1), + IWL_RX_HE_PHY_BSS_COLOR_MASK = 0xfc, + IWL_RX_HE_PHY_SPATIAL_REUSE_MASK = 0xf00, + IWL_RX_HE_PHY_SU_EXT_BW10 = BIT(12), + IWL_RX_HE_PHY_TXOP_DUR_MASK = 0xfe000, + IWL_RX_HE_PHY_LDPC_EXT_SYM = BIT(20), + IWL_RX_HE_PHY_PRE_FEC_PAD_MASK = 0x600000, + IWL_RX_HE_PHY_PE_DISAMBIG = BIT(23), + IWL_RX_HE_PHY_DOPPLER = BIT(24), + /* 6 bits reserved */ + IWL_RX_HE_PHY_DELIM_EOF = BIT(31), + + /* second dword - MU data */ + IWL_RX_HE_PHY_SIGB_COMPRESSION = BIT_ULL(32 + 0), + IWL_RX_HE_PHY_SIBG_SYM_OR_USER_NUM_MASK = 0x1e00000000ULL, + IWL_RX_HE_PHY_HE_LTF_NUM_MASK = 0xe000000000ULL, + IWL_RX_HE_PHY_RU_ALLOC_SEC80 = BIT_ULL(32 + 8), + /* trigger encoded */ + IWL_RX_HE_PHY_RU_ALLOC_MASK = 0xfe0000000000ULL, + IWL_RX_HE_PHY_SIGB_MCS_MASK = 0xf000000000000ULL, + /* 1 bit reserved */ + IWL_RX_HE_PHY_SIGB_DCM = BIT_ULL(32 + 21), + IWL_RX_HE_PHY_PREAMBLE_PUNC_TYPE_MASK = 0xc0000000000000ULL, + /* 8 bits reserved */ +}; + +/** + * struct iwl_rx_mpdu_desc_v1 - RX MPDU descriptor + */ +struct iwl_rx_mpdu_desc_v1 { + /* DW7 - carries rss_hash only when rpa_en == 1 */ + /** + * @rss_hash: RSS hash value + */ + __le32 rss_hash; + /* DW8 - carries filter_match only when rpa_en == 1 */ + /** + * @filter_match: filter match value + */ + __le32 filter_match; + /* DW9 */ + /** + * @rate_n_flags: RX rate/flags encoding + */ + __le32 rate_n_flags; + /* DW10 */ + /** + * @energy_a: energy chain A + */ + u8 energy_a; + /** + * @energy_b: energy chain B + */ + u8 energy_b; + /** + * @channel: channel number + */ + u8 channel; + /** + * @mac_context: MAC context mask + */ + u8 mac_context; + /* DW11 */ + /** + * @gp2_on_air_rise: GP2 timer value on air rise (INA) + */ + __le32 gp2_on_air_rise; + /* DW12 & DW13 */ + union { + /** + * @tsf_on_air_rise: + * TSF value on air rise (INA), only valid if + * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set + */ + __le64 tsf_on_air_rise; + /** + * @he_phy_data: + * HE PHY data, see &enum iwl_rx_he_phy, valid + * only if %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set + */ + __le64 he_phy_data; + }; +} __packed; + +/** + * struct iwl_rx_mpdu_desc_v3 - RX MPDU descriptor + */ +struct iwl_rx_mpdu_desc_v3 { + /* DW7 - carries filter_match only when rpa_en == 1 */ + /** + * @filter_match: filter match value + */ + __le32 filter_match; + /* DW8 - carries rss_hash only when rpa_en == 1 */ + /** + * @rss_hash: RSS hash value + */ + __le32 rss_hash; + /* DW9 */ + /** + * @partial_hash: 31:0 ip/tcp header hash + * w/o some fields (such as IP SRC addr) + */ + __le32 partial_hash; + /* DW10 */ + /** + * @raw_xsum: raw xsum value + */ + __le32 raw_xsum; + /* DW11 */ + /** + * @rate_n_flags: RX rate/flags encoding + */ + __le32 rate_n_flags; + /* DW12 */ + /** + * @energy_a: energy chain A + */ + u8 energy_a; + /** + * @energy_b: energy chain B + */ + u8 energy_b; + /** + * @channel: channel number + */ + u8 channel; + /** + * @mac_context: MAC context mask + */ + u8 mac_context; + /* DW13 */ + /** + * @gp2_on_air_rise: GP2 timer value on air rise (INA) + */ + __le32 gp2_on_air_rise; + /* DW14 & DW15 */ + union { + /** + * @tsf_on_air_rise: + * TSF value on air rise (INA), only valid if + * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set + */ + __le64 tsf_on_air_rise; + /** + * @he_phy_data: + * HE PHY data, see &enum iwl_rx_he_phy, valid + * only if %IWL_RX_MPDU_PHY_TSF_OVERLOAD is set + */ + __le64 he_phy_data; + }; + /* DW16 & DW17 */ + /** + * @reserved: reserved + */ + __le32 reserved[2]; +} __packed; /* RX_MPDU_RES_START_API_S_VER_3 */ + /** * struct iwl_rx_mpdu_desc - RX MPDU descriptor */ @@ -400,51 +565,14 @@ struct iwl_rx_mpdu_desc { * @reorder_data: &enum iwl_rx_mpdu_reorder_data */ __le32 reorder_data; - /* DW7 - carries rss_hash only when rpa_en == 1 */ - /** - * @rss_hash: RSS hash value - */ - __le32 rss_hash; - /* DW8 - carries filter_match only when rpa_en == 1 */ - /** - * @filter_match: filter match value - */ - __le32 filter_match; - /* DW9 */ - /** - * @rate_n_flags: RX rate/flags encoding - */ - __le32 rate_n_flags; - /* DW10 */ - /** - * @energy_a: energy chain A - */ - u8 energy_a; - /** - * @energy_b: energy chain B - */ - u8 energy_b; - /** - * @channel: channel number - */ - u8 channel; - /** - * @mac_context: MAC context mask - */ - u8 mac_context; - /* DW11 */ - /** - * @gp2_on_air_rise: GP2 timer value on air rise (INA) - */ - __le32 gp2_on_air_rise; - /* DW12 & DW13 */ - /** - * @tsf_on_air_rise: - * TSF value on air rise (INA), only valid if - * %IWL_RX_MPDU_PHY_TSF_OVERLOAD isn't set - */ - __le64 tsf_on_air_rise; -} __packed; + + union { + struct iwl_rx_mpdu_desc_v1 v1; + struct iwl_rx_mpdu_desc_v3 v3; + }; +} __packed; /* RX_MPDU_RES_START_API_S_VER_3 */ + +#define IWL_RX_DESC_SIZE_V1 offsetofend(struct iwl_rx_mpdu_desc, v1) struct iwl_frame_release { u8 baid; @@ -587,4 +715,36 @@ struct iwl_ba_window_status_notif { __le16 mpdu_rx_count[BA_WINDOW_STREAMS_MAX]; } __packed; /* BA_WINDOW_STATUS_NTFY_API_S_VER_1 */ +/** + * struct iwl_rfh_queue_config - RX queue configuration + * @q_num: Q num + * @enable: enable queue + * @reserved: alignment + * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr + * @fr_bd_cb: DMA address of freeRB table + * @ur_bd_cb: DMA address of used RB table + * @fr_bd_wid: Initial index of the free table + */ +struct iwl_rfh_queue_data { + u8 q_num; + u8 enable; + __le16 reserved; + __le64 urbd_stts_wrptr; + __le64 fr_bd_cb; + __le64 ur_bd_cb; + __le32 fr_bd_wid; +} __packed; /* RFH_QUEUE_CONFIG_S_VER_1 */ + +/** + * struct iwl_rfh_queue_config - RX queue configuration + * @num_queues: number of queues configured + * @reserved: alignment + * @data: DMA addresses per-queue + */ +struct iwl_rfh_queue_config { + u8 num_queues; + u8 reserved[3]; + struct iwl_rfh_queue_data data[]; +} __packed; /* RFH_QUEUE_CONFIG_API_S_VER_1 */ + #endif /* __iwl_fw_api_rx_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h index a2a40b515a3c..514b86123d3d 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/api/tx.h @@ -7,6 +7,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -28,6 +29,7 @@ * * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -320,6 +322,29 @@ struct iwl_tx_cmd_gen2 { struct ieee80211_hdr hdr[0]; } __packed; /* TX_CMD_API_S_VER_7 */ +/** + * struct iwl_tx_cmd_gen3 - TX command struct to FW for 22560 devices + * ( TX_CMD = 0x1c ) + * @len: in bytes of the payload, see below for details + * @flags: combination of &enum iwl_tx_cmd_flags + * @offload_assist: TX offload configuration + * @dram_info: FW internal DRAM storage + * @rate_n_flags: rate for *all* Tx attempts, if TX_CMD_FLG_STA_RATE_MSK is + * cleared. Combination of RATE_MCS_* + * @ttl: time to live - packet lifetime limit. The FW should drop if + * passed. + * @hdr: 802.11 header + */ +struct iwl_tx_cmd_gen3 { + __le16 len; + __le16 flags; + __le32 offload_assist; + struct iwl_dram_sec_info dram_info; + __le32 rate_n_flags; + __le64 ttl; + struct ieee80211_hdr hdr[0]; +} __packed; /* TX_CMD_API_S_VER_8 */ + /* * TX response related data */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/common_rx.c b/drivers/net/wireless/intel/iwlwifi/fw/common_rx.c deleted file mode 100644 index 6f75985eea66..000000000000 --- a/drivers/net/wireless/intel/iwlwifi/fw/common_rx.c +++ /dev/null @@ -1,88 +0,0 @@ -/****************************************************************************** - * - * This file is provided under a dual BSD/GPLv2 license. When using or - * redistributing this file, you may do so under either license. - * - * GPL LICENSE SUMMARY - * - * Copyright(c) 2017 Intel Deutschland GmbH - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of version 2 of the GNU General Public License as - * published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, but - * WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * General Public License for more details. - * - * The full GNU General Public License is included in this distribution - * in the file called COPYING. - * - * Contact Information: - * Intel Linux Wireless <linuxwifi@intel.com> - * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 - * - * BSD LICENSE - * - * Copyright(c) 2017 Intel Deutschland GmbH - * All rights reserved. - * - * Redistribution and use in source and binary forms, with or without - * modification, are permitted provided that the following conditions - * are met: - * - * * Redistributions of source code must retain the above copyright - * notice, this list of conditions and the following disclaimer. - * * Redistributions in binary form must reproduce the above copyright - * notice, this list of conditions and the following disclaimer in - * the documentation and/or other materials provided with the - * distribution. - * * Neither the name Intel Corporation nor the names of its - * contributors may be used to endorse or promote products derived - * from this software without specific prior written permission. - * - * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS - * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT - * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR - * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT - * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, - * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT - * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, - * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY - * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT - * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE - * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. - * - *****************************************************************************/ -#include "iwl-drv.h" -#include "runtime.h" -#include "fw/api/commands.h" -#include "fw/api/alive.h" - -static void iwl_fwrt_fseq_ver_mismatch(struct iwl_fw_runtime *fwrt, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - struct iwl_fseq_ver_mismatch_ntf *fseq = (void *)pkt->data; - - IWL_ERR(fwrt, "FSEQ version mismatch (aux: %d, wifi: %d)\n", - __le32_to_cpu(fseq->aux_read_fseq_ver), - __le32_to_cpu(fseq->wifi_fseq_ver)); -} - -void iwl_fwrt_handle_notification(struct iwl_fw_runtime *fwrt, - struct iwl_rx_cmd_buffer *rxb) -{ - struct iwl_rx_packet *pkt = rxb_addr(rxb); - u32 cmd = WIDE_ID(pkt->hdr.group_id, pkt->hdr.cmd); - - switch (cmd) { - case WIDE_ID(SYSTEM_GROUP, FSEQ_VER_MISMATCH_NTF): - iwl_fwrt_fseq_ver_mismatch(fwrt, rxb); - break; - default: - break; - } -} -IWL_EXPORT_SYMBOL(iwl_fwrt_handle_notification); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c index fa283285fcbe..a31a42e673c4 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/dbg.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/dbg.c @@ -243,39 +243,47 @@ static void iwl_fw_dump_fifos(struct iwl_fw_runtime *fwrt, if (!iwl_trans_grab_nic_access(fwrt->trans, &flags)) return; - /* Pull RXF1 */ - iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[0].rxfifo1_size, 0, 0); - /* Pull RXF2 */ - iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size, - RXF_DIFF_FROM_PREV, 1); - /* Pull LMAC2 RXF1 */ - if (fwrt->smem_cfg.num_lmacs > 1) - iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->lmac[1].rxfifo1_size, - LMAC2_PRPH_OFFSET, 2); - - /* Pull TXF data from LMAC1 */ - for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) { - /* Mark the number of TXF we're pulling now */ - iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM, i); - iwl_fwrt_dump_txf(fwrt, dump_data, cfg->lmac[0].txfifo_size[i], - 0, i); + if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) { + /* Pull RXF1 */ + iwl_fwrt_dump_rxf(fwrt, dump_data, + cfg->lmac[0].rxfifo1_size, 0, 0); + /* Pull RXF2 */ + iwl_fwrt_dump_rxf(fwrt, dump_data, cfg->rxfifo2_size, + RXF_DIFF_FROM_PREV, 1); + /* Pull LMAC2 RXF1 */ + if (fwrt->smem_cfg.num_lmacs > 1) + iwl_fwrt_dump_rxf(fwrt, dump_data, + cfg->lmac[1].rxfifo1_size, + LMAC2_PRPH_OFFSET, 2); } - /* Pull TXF data from LMAC2 */ - if (fwrt->smem_cfg.num_lmacs > 1) { + if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) { + /* Pull TXF data from LMAC1 */ for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; i++) { /* Mark the number of TXF we're pulling now */ - iwl_trans_write_prph(fwrt->trans, - TXF_LARC_NUM + LMAC2_PRPH_OFFSET, - i); + iwl_trans_write_prph(fwrt->trans, TXF_LARC_NUM, i); iwl_fwrt_dump_txf(fwrt, dump_data, - cfg->lmac[1].txfifo_size[i], - LMAC2_PRPH_OFFSET, - i + cfg->num_txfifo_entries); + cfg->lmac[0].txfifo_size[i], 0, i); + } + + /* Pull TXF data from LMAC2 */ + if (fwrt->smem_cfg.num_lmacs > 1) { + for (i = 0; i < fwrt->smem_cfg.num_txfifo_entries; + i++) { + /* Mark the number of TXF we're pulling now */ + iwl_trans_write_prph(fwrt->trans, + TXF_LARC_NUM + + LMAC2_PRPH_OFFSET, i); + iwl_fwrt_dump_txf(fwrt, dump_data, + cfg->lmac[1].txfifo_size[i], + LMAC2_PRPH_OFFSET, + i + cfg->num_txfifo_entries); + } } } - if (fw_has_capa(&fwrt->fw->ucode_capa, + if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF) && + fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { /* Pull UMAC internal TXF data from all TXFs */ for (i = 0; @@ -600,42 +608,54 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) { fifo_data_len = 0; - /* Count RXF2 size */ - if (mem_cfg->rxfifo2_size) { - /* Add header info */ - fifo_data_len += mem_cfg->rxfifo2_size + - sizeof(*dump_data) + - sizeof(struct iwl_fw_error_dump_fifo); - } - - /* Count RXF1 sizes */ - for (i = 0; i < mem_cfg->num_lmacs; i++) { - if (!mem_cfg->lmac[i].rxfifo1_size) - continue; - - /* Add header info */ - fifo_data_len += mem_cfg->lmac[i].rxfifo1_size + - sizeof(*dump_data) + - sizeof(struct iwl_fw_error_dump_fifo); - } + if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RXF)) { - /* Count TXF sizes */ - for (i = 0; i < mem_cfg->num_lmacs; i++) { - int j; + /* Count RXF2 size */ + if (mem_cfg->rxfifo2_size) { + /* Add header info */ + fifo_data_len += + mem_cfg->rxfifo2_size + + sizeof(*dump_data) + + sizeof(struct iwl_fw_error_dump_fifo); + } - for (j = 0; j < mem_cfg->num_txfifo_entries; j++) { - if (!mem_cfg->lmac[i].txfifo_size[j]) + /* Count RXF1 sizes */ + for (i = 0; i < mem_cfg->num_lmacs; i++) { + if (!mem_cfg->lmac[i].rxfifo1_size) continue; /* Add header info */ fifo_data_len += - mem_cfg->lmac[i].txfifo_size[j] + + mem_cfg->lmac[i].rxfifo1_size + sizeof(*dump_data) + sizeof(struct iwl_fw_error_dump_fifo); } } - if (fw_has_capa(&fwrt->fw->ucode_capa, + if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXF)) { + size_t fifo_const_len = sizeof(*dump_data) + + sizeof(struct iwl_fw_error_dump_fifo); + + /* Count TXF sizes */ + for (i = 0; i < mem_cfg->num_lmacs; i++) { + int j; + + for (j = 0; j < mem_cfg->num_txfifo_entries; + j++) { + if (!mem_cfg->lmac[i].txfifo_size[j]) + continue; + + /* Add header info */ + fifo_data_len += + fifo_const_len + + mem_cfg->lmac[i].txfifo_size[j]; + } + } + } + + if ((fwrt->fw->dbg_dump_mask & + BIT(IWL_FW_ERROR_DUMP_INTERNAL_TXF)) && + fw_has_capa(&fwrt->fw->ucode_capa, IWL_UCODE_TLV_CAPA_EXTEND_SHARED_MEM_CFG)) { for (i = 0; i < ARRAY_SIZE(mem_cfg->internal_txfifo_size); @@ -652,7 +672,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) } /* Make room for PRPH registers */ - if (!fwrt->trans->cfg->gen2) { + if (!fwrt->trans->cfg->gen2 && + fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH)) { for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_comm); i++) { /* The range includes both boundaries */ @@ -667,7 +688,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) } if (!fwrt->trans->cfg->gen2 && - fwrt->trans->cfg->mq_rx_supported) { + fwrt->trans->cfg->mq_rx_supported && + fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PRPH)) { for (i = 0; i < ARRAY_SIZE(iwl_prph_dump_addr_9000); i++) { /* The range includes both boundaries */ @@ -681,34 +703,42 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) } } - if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000) + if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 && + fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RADIO_REG)) radio_len = sizeof(*dump_data) + RADIO_REG_MAX_READ; } file_len = sizeof(*dump_file) + - sizeof(*dump_data) * 3 + - sizeof(*dump_smem_cfg) + fifo_data_len + prph_len + - radio_len + - sizeof(*dump_info); - - /* Make room for the SMEM, if it exists */ - if (smem_len) - file_len += sizeof(*dump_data) + sizeof(*dump_mem) + smem_len; - - /* Make room for the secondary SRAM, if it exists */ - if (sram2_len) - file_len += sizeof(*dump_data) + sizeof(*dump_mem) + sram2_len; - - /* Make room for MEM segments */ - for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) { - file_len += sizeof(*dump_data) + sizeof(*dump_mem) + - le32_to_cpu(fw_dbg_mem[i].len); + radio_len; + + if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO)) + file_len += sizeof(*dump_data) + sizeof(*dump_info); + if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG)) + file_len += sizeof(*dump_data) + sizeof(*dump_smem_cfg); + + if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) { + /* Make room for the SMEM, if it exists */ + if (smem_len) + file_len += sizeof(*dump_data) + sizeof(*dump_mem) + + smem_len; + + /* Make room for the secondary SRAM, if it exists */ + if (sram2_len) + file_len += sizeof(*dump_data) + sizeof(*dump_mem) + + sram2_len; + + /* Make room for MEM segments */ + for (i = 0; i < fwrt->fw->n_dbg_mem_tlv; i++) { + file_len += sizeof(*dump_data) + sizeof(*dump_mem) + + le32_to_cpu(fw_dbg_mem[i].len); + } } /* Make room for fw's virtual image pages, if it exists */ - if (!fwrt->trans->cfg->gen2 && + if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) && + !fwrt->trans->cfg->gen2 && fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size && fwrt->fw_paging_db[0].fw_paging_block) file_len += fwrt->num_of_paging_blk * @@ -722,12 +752,14 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) sizeof(*dump_info) + sizeof(*dump_smem_cfg); } - if (fwrt->dump.desc) + if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) && + fwrt->dump.desc) file_len += sizeof(*dump_data) + sizeof(*dump_trig) + fwrt->dump.desc->len; - if (!fwrt->fw->n_dbg_mem_tlv) - file_len += sram_len + sizeof(*dump_mem); + if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM) && + !fwrt->fw->n_dbg_mem_tlv) + file_len += sizeof(*dump_data) + sram_len + sizeof(*dump_mem); dump_file = vzalloc(file_len); if (!dump_file) { @@ -740,48 +772,56 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) dump_file->barker = cpu_to_le32(IWL_FW_ERROR_DUMP_BARKER); dump_data = (void *)dump_file->data; - dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO); - dump_data->len = cpu_to_le32(sizeof(*dump_info)); - dump_info = (void *)dump_data->data; - dump_info->device_family = - fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_7000 ? - cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) : - cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8); - dump_info->hw_step = cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev)); - memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable, - sizeof(dump_info->fw_human_readable)); - strncpy(dump_info->dev_human_readable, fwrt->trans->cfg->name, - sizeof(dump_info->dev_human_readable)); - strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name, - sizeof(dump_info->bus_human_readable)); - - dump_data = iwl_fw_error_next_data(dump_data); - - /* Dump shared memory configuration */ - dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG); - dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg)); - dump_smem_cfg = (void *)dump_data->data; - dump_smem_cfg->num_lmacs = cpu_to_le32(mem_cfg->num_lmacs); - dump_smem_cfg->num_txfifo_entries = - cpu_to_le32(mem_cfg->num_txfifo_entries); - for (i = 0; i < MAX_NUM_LMAC; i++) { - int j; - - for (j = 0; j < TX_FIFO_MAX_NUM; j++) - dump_smem_cfg->lmac[i].txfifo_size[j] = - cpu_to_le32(mem_cfg->lmac[i].txfifo_size[j]); - dump_smem_cfg->lmac[i].rxfifo1_size = - cpu_to_le32(mem_cfg->lmac[i].rxfifo1_size); - } - dump_smem_cfg->rxfifo2_size = cpu_to_le32(mem_cfg->rxfifo2_size); - dump_smem_cfg->internal_txfifo_addr = - cpu_to_le32(mem_cfg->internal_txfifo_addr); - for (i = 0; i < TX_FIFO_INTERNAL_MAX_NUM; i++) { - dump_smem_cfg->internal_txfifo_size[i] = - cpu_to_le32(mem_cfg->internal_txfifo_size[i]); + if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_DEV_FW_INFO)) { + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_DEV_FW_INFO); + dump_data->len = cpu_to_le32(sizeof(*dump_info)); + dump_info = (void *)dump_data->data; + dump_info->device_family = + fwrt->trans->cfg->device_family == + IWL_DEVICE_FAMILY_7000 ? + cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_7) : + cpu_to_le32(IWL_FW_ERROR_DUMP_FAMILY_8); + dump_info->hw_step = + cpu_to_le32(CSR_HW_REV_STEP(fwrt->trans->hw_rev)); + memcpy(dump_info->fw_human_readable, fwrt->fw->human_readable, + sizeof(dump_info->fw_human_readable)); + strncpy(dump_info->dev_human_readable, fwrt->trans->cfg->name, + sizeof(dump_info->dev_human_readable) - 1); + strncpy(dump_info->bus_human_readable, fwrt->dev->bus->name, + sizeof(dump_info->bus_human_readable) - 1); + + dump_data = iwl_fw_error_next_data(dump_data); } - dump_data = iwl_fw_error_next_data(dump_data); + if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM_CFG)) { + /* Dump shared memory configuration */ + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM_CFG); + dump_data->len = cpu_to_le32(sizeof(*dump_smem_cfg)); + dump_smem_cfg = (void *)dump_data->data; + dump_smem_cfg->num_lmacs = cpu_to_le32(mem_cfg->num_lmacs); + dump_smem_cfg->num_txfifo_entries = + cpu_to_le32(mem_cfg->num_txfifo_entries); + for (i = 0; i < MAX_NUM_LMAC; i++) { + int j; + u32 *txf_size = mem_cfg->lmac[i].txfifo_size; + + for (j = 0; j < TX_FIFO_MAX_NUM; j++) + dump_smem_cfg->lmac[i].txfifo_size[j] = + cpu_to_le32(txf_size[j]); + dump_smem_cfg->lmac[i].rxfifo1_size = + cpu_to_le32(mem_cfg->lmac[i].rxfifo1_size); + } + dump_smem_cfg->rxfifo2_size = + cpu_to_le32(mem_cfg->rxfifo2_size); + dump_smem_cfg->internal_txfifo_addr = + cpu_to_le32(mem_cfg->internal_txfifo_addr); + for (i = 0; i < TX_FIFO_INTERNAL_MAX_NUM; i++) { + dump_smem_cfg->internal_txfifo_size[i] = + cpu_to_le32(mem_cfg->internal_txfifo_size[i]); + } + + dump_data = iwl_fw_error_next_data(dump_data); + } /* We only dump the FIFOs if the FW is in error state */ if (test_bit(STATUS_FW_ERROR, &fwrt->trans->status)) { @@ -790,7 +830,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) iwl_read_radio_regs(fwrt, &dump_data); } - if (fwrt->dump.desc) { + if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_ERROR_INFO) && + fwrt->dump.desc) { dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_ERROR_INFO); dump_data->len = cpu_to_le32(sizeof(*dump_trig) + fwrt->dump.desc->len); @@ -805,7 +846,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) if (monitor_dump_only) goto dump_trans_data; - if (!fwrt->fw->n_dbg_mem_tlv) { + if (!fwrt->fw->n_dbg_mem_tlv && + fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) { dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); dump_data->len = cpu_to_le32(sram_len + sizeof(*dump_mem)); dump_mem = (void *)dump_data->data; @@ -821,6 +863,9 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) u32 ofs = le32_to_cpu(fw_dbg_mem[i].ofs); bool success; + if (!(fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM))) + break; + dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); dump_data->len = cpu_to_le32(len + sizeof(*dump_mem)); dump_mem = (void *)dump_data->data; @@ -854,7 +899,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) dump_data = iwl_fw_error_next_data(dump_data); } - if (smem_len) { + if (smem_len && fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) { IWL_DEBUG_INFO(fwrt, "WRT SMEM dump\n"); dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); dump_data->len = cpu_to_le32(smem_len + sizeof(*dump_mem)); @@ -867,7 +912,7 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) dump_data = iwl_fw_error_next_data(dump_data); } - if (sram2_len) { + if (sram2_len && fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_MEM)) { IWL_DEBUG_INFO(fwrt, "WRT SRAM dump\n"); dump_data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_MEM); dump_data->len = cpu_to_le32(sram2_len + sizeof(*dump_mem)); @@ -881,7 +926,8 @@ void iwl_fw_error_dump(struct iwl_fw_runtime *fwrt) } /* Dump fw's virtual image */ - if (!fwrt->trans->cfg->gen2 && + if (fwrt->fw->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING) && + !fwrt->trans->cfg->gen2 && fwrt->fw->img[fwrt->cur_fw_img].paging_mem_size && fwrt->fw_paging_db[0].fw_paging_block) { IWL_DEBUG_INFO(fwrt, "WRT paging dump\n"); diff --git a/drivers/net/wireless/intel/iwlwifi/fw/file.h b/drivers/net/wireless/intel/iwlwifi/fw/file.h index 9d939cbaf6c6..bbf2b265a06a 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/file.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/file.h @@ -146,6 +146,9 @@ enum iwl_ucode_tlv_type { IWL_UCODE_TLV_FW_GSCAN_CAPA = 50, IWL_UCODE_TLV_FW_MEM_SEG = 51, IWL_UCODE_TLV_IML = 52, + + /* TLVs 0x1000-0x2000 are for internal driver usage */ + IWL_UCODE_TLV_FW_DBG_DUMP_LST = 0x1000, }; struct iwl_ucode_tlv { @@ -318,7 +321,7 @@ typedef unsigned int __bitwise iwl_ucode_tlv_capa_t; * IWL_UCODE_TLV_API_WIFI_MCC_UPDATE. When either is set, multi-source LAR * is supported. * @IWL_UCODE_TLV_CAPA_BT_COEX_RRC: supports BT Coex RRC - * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan + * @IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT: supports gscan (no longer used) * @IWL_UCODE_TLV_CAPA_STA_PM_NOTIF: firmware will send STA PM notification * @IWL_UCODE_TLV_CAPA_TLC_OFFLOAD: firmware implements rate scaling algorithm * @IWL_UCODE_TLV_CAPA_DYNAMIC_QUOTA: firmware implements quota related @@ -889,39 +892,4 @@ struct iwl_fw_dbg_conf_tlv { struct iwl_fw_dbg_conf_hcmd hcmd; } __packed; -/** - * struct iwl_fw_gscan_capabilities - gscan capabilities supported by FW - * @max_scan_cache_size: total space allocated for scan results (in bytes). - * @max_scan_buckets: maximum number of channel buckets. - * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan. - * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI. - * @max_scan_reporting_threshold: max possible report threshold. in percentage. - * @max_hotlist_aps: maximum number of entries for hotlist APs. - * @max_significant_change_aps: maximum number of entries for significant - * change APs. - * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can - * hold. - * @max_hotlist_ssids: maximum number of entries for hotlist SSIDs. - * @max_number_epno_networks: max number of epno entries. - * @max_number_epno_networks_by_ssid: max number of epno entries if ssid is - * specified. - * @max_number_of_white_listed_ssid: max number of white listed SSIDs. - * @max_number_of_black_listed_ssid: max number of black listed SSIDs. - */ -struct iwl_fw_gscan_capabilities { - __le32 max_scan_cache_size; - __le32 max_scan_buckets; - __le32 max_ap_cache_per_scan; - __le32 max_rssi_sample_size; - __le32 max_scan_reporting_threshold; - __le32 max_hotlist_aps; - __le32 max_significant_change_aps; - __le32 max_bssid_history_entries; - __le32 max_hotlist_ssids; - __le32 max_number_epno_networks; - __le32 max_number_epno_networks_by_ssid; - __le32 max_number_of_white_listed_ssid; - __le32 max_number_of_black_listed_ssid; -} __packed; - #endif /* __iwl_fw_file_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/img.h b/drivers/net/wireless/intel/iwlwifi/fw/img.h index f4912382b6af..0861b97c4233 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/img.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/img.h @@ -193,41 +193,6 @@ struct iwl_fw_cscheme_list { } __packed; /** - * struct iwl_gscan_capabilities - gscan capabilities supported by FW - * @max_scan_cache_size: total space allocated for scan results (in bytes). - * @max_scan_buckets: maximum number of channel buckets. - * @max_ap_cache_per_scan: maximum number of APs that can be stored per scan. - * @max_rssi_sample_size: number of RSSI samples used for averaging RSSI. - * @max_scan_reporting_threshold: max possible report threshold. in percentage. - * @max_hotlist_aps: maximum number of entries for hotlist APs. - * @max_significant_change_aps: maximum number of entries for significant - * change APs. - * @max_bssid_history_entries: number of BSSID/RSSI entries that the device can - * hold. - * @max_hotlist_ssids: maximum number of entries for hotlist SSIDs. - * @max_number_epno_networks: max number of epno entries. - * @max_number_epno_networks_by_ssid: max number of epno entries if ssid is - * specified. - * @max_number_of_white_listed_ssid: max number of white listed SSIDs. - * @max_number_of_black_listed_ssid: max number of black listed SSIDs. - */ -struct iwl_gscan_capabilities { - u32 max_scan_cache_size; - u32 max_scan_buckets; - u32 max_ap_cache_per_scan; - u32 max_rssi_sample_size; - u32 max_scan_reporting_threshold; - u32 max_hotlist_aps; - u32 max_significant_change_aps; - u32 max_bssid_history_entries; - u32 max_hotlist_ssids; - u32 max_number_epno_networks; - u32 max_number_epno_networks_by_ssid; - u32 max_number_of_white_listed_ssid; - u32 max_number_of_black_listed_ssid; -}; - -/** * enum iwl_fw_type - iwlwifi firmware type * @IWL_FW_DVM: DVM firmware * @IWL_FW_MVM: MVM firmware @@ -298,7 +263,7 @@ struct iwl_fw { size_t n_dbg_mem_tlv; size_t dbg_trigger_tlv_len[FW_DBG_TRIGGER_MAX]; u8 dbg_dest_reg_num; - struct iwl_gscan_capabilities gscan_capa; + u32 dbg_dump_mask; }; static inline const char *get_fw_dbg_mode_string(int mode) diff --git a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h index d8db1dd100b0..ed23367f7088 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/runtime.h +++ b/drivers/net/wireless/intel/iwlwifi/fw/runtime.h @@ -168,7 +168,4 @@ void iwl_free_fw_paging(struct iwl_fw_runtime *fwrt); void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt); -void iwl_fwrt_handle_notification(struct iwl_fw_runtime *fwrt, - struct iwl_rx_cmd_buffer *rxb); - #endif /* __iwl_fw_runtime_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/fw/smem.c b/drivers/net/wireless/intel/iwlwifi/fw/smem.c index fb4b6442b4d7..ff85d69c2a8c 100644 --- a/drivers/net/wireless/intel/iwlwifi/fw/smem.c +++ b/drivers/net/wireless/intel/iwlwifi/fw/smem.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -143,7 +145,7 @@ void iwl_get_shared_mem_conf(struct iwl_fw_runtime *fwrt) return; pkt = cmd.resp_pkt; - if (fwrt->trans->cfg->device_family == IWL_DEVICE_FAMILY_22000) + if (fwrt->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) iwl_parse_shared_mem_22000(fwrt, pkt); else iwl_parse_shared_mem(fwrt, pkt); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-config.h b/drivers/net/wireless/intel/iwlwifi/iwl-config.h index 84a816809723..12fddcf15bab 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-config.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-config.h @@ -93,6 +93,7 @@ enum iwl_device_family { IWL_DEVICE_FAMILY_8000, IWL_DEVICE_FAMILY_9000, IWL_DEVICE_FAMILY_22000, + IWL_DEVICE_FAMILY_22560, }; /* @@ -176,6 +177,7 @@ static inline u8 num_of_ant(u8 mask) * @apmg_wake_up_wa: should the MAC access REQ be asserted when a command * is in flight. This is due to a HW bug in 7260, 3160 and 7265. * @scd_chain_ext_wa: should the chain extension feature in SCD be disabled. + * @max_tfd_queue_size: max number of entries in tfd queue. */ struct iwl_base_params { unsigned int wd_timeout; @@ -191,6 +193,7 @@ struct iwl_base_params { scd_chain_ext_wa:1; u16 num_of_queues; /* def: HW dependent */ + u32 max_tfd_queue_size; /* def: HW dependent */ u8 max_ll_items; u8 led_compensation; @@ -571,9 +574,11 @@ extern const struct iwl_cfg iwl22000_2ac_cfg_hr; extern const struct iwl_cfg iwl22000_2ac_cfg_hr_cdb; extern const struct iwl_cfg iwl22000_2ac_cfg_jf; extern const struct iwl_cfg iwl22000_2ax_cfg_hr; -extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_f0; +extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0_f0; +extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_b0; extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_jf_b0; extern const struct iwl_cfg iwl22000_2ax_cfg_qnj_hr_a0; +extern const struct iwl_cfg iwl22560_2ax_cfg_su_cdb; #endif /* CONFIG_IWLMVM */ #endif /* __IWL_CONFIG_H__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h new file mode 100644 index 000000000000..ebea99189ca9 --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info-gen3.h @@ -0,0 +1,286 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2018 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright(c) 2018 Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ +#ifndef __iwl_context_info_file_gen3_h__ +#define __iwl_context_info_file_gen3_h__ + +#include "iwl-context-info.h" + +#define CSR_CTXT_INFO_BOOT_CTRL 0x0 +#define CSR_CTXT_INFO_ADDR 0x118 +#define CSR_IML_DATA_ADDR 0x120 +#define CSR_IML_SIZE_ADDR 0x128 +#define CSR_IML_RESP_ADDR 0x12c + +/* Set bit for enabling automatic function boot */ +#define CSR_AUTO_FUNC_BOOT_ENA BIT(1) +/* Set bit for initiating function boot */ +#define CSR_AUTO_FUNC_INIT BIT(7) + +/** + * enum iwl_prph_scratch_mtr_format - tfd size configuration + * @IWL_PRPH_MTR_FORMAT_16B: 16 bit tfd + * @IWL_PRPH_MTR_FORMAT_32B: 32 bit tfd + * @IWL_PRPH_MTR_FORMAT_64B: 64 bit tfd + * @IWL_PRPH_MTR_FORMAT_256B: 256 bit tfd + */ +enum iwl_prph_scratch_mtr_format { + IWL_PRPH_MTR_FORMAT_16B = 0x0, + IWL_PRPH_MTR_FORMAT_32B = 0x40000, + IWL_PRPH_MTR_FORMAT_64B = 0x80000, + IWL_PRPH_MTR_FORMAT_256B = 0xC0000, +}; + +/** + * enum iwl_prph_scratch_flags - PRPH scratch control flags + * @IWL_PRPH_SCRATCH_EARLY_DEBUG_EN: enable early debug conf + * @IWL_PRPH_SCRATCH_EDBG_DEST_DRAM: use DRAM, with size allocated + * in hwm config. + * @IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL: use buffer on SRAM + * @IWL_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER: use st arbiter, mainly for + * multicomm. + * @IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF: route debug data to SoC HW + * @IWL_PRPH_SCTATCH_RB_SIZE_4K: Use 4K RB size (the default is 2K) + * @IWL_PRPH_SCRATCH_MTR_MODE: format used for completion - 0: for + * completion descriptor, 1 for responses (legacy) + * @IWL_PRPH_SCRATCH_MTR_FORMAT: a mask for the size of the tfd. + * There are 4 optional values: 0: 16 bit, 1: 32 bit, 2: 64 bit, + * 3: 256 bit. + */ +enum iwl_prph_scratch_flags { + IWL_PRPH_SCRATCH_EARLY_DEBUG_EN = BIT(4), + IWL_PRPH_SCRATCH_EDBG_DEST_DRAM = BIT(8), + IWL_PRPH_SCRATCH_EDBG_DEST_INTERNAL = BIT(9), + IWL_PRPH_SCRATCH_EDBG_DEST_ST_ARBITER = BIT(10), + IWL_PRPH_SCRATCH_EDBG_DEST_TB22DTF = BIT(11), + IWL_PRPH_SCRATCH_RB_SIZE_4K = BIT(16), + IWL_PRPH_SCRATCH_MTR_MODE = BIT(17), + IWL_PRPH_SCRATCH_MTR_FORMAT = BIT(18) | BIT(19), +}; + +/* + * struct iwl_prph_scratch_version - version structure + * @mac_id: SKU and revision id + * @version: prph scratch information version id + * @size: the size of the context information in DWs + * @reserved: reserved + */ +struct iwl_prph_scratch_version { + __le16 mac_id; + __le16 version; + __le16 size; + __le16 reserved; +} __packed; /* PERIPH_SCRATCH_VERSION_S */ + +/* + * struct iwl_prph_scratch_control - control structure + * @control_flags: context information flags see &enum iwl_prph_scratch_flags + * @reserved: reserved + */ +struct iwl_prph_scratch_control { + __le32 control_flags; + __le32 reserved; +} __packed; /* PERIPH_SCRATCH_CONTROL_S */ + +/* + * struct iwl_prph_scratch_ror_cfg - ror config + * @ror_base_addr: ror start address + * @ror_size: ror size in DWs + * @reserved: reserved + */ +struct iwl_prph_scratch_ror_cfg { + __le64 ror_base_addr; + __le32 ror_size; + __le32 reserved; +} __packed; /* PERIPH_SCRATCH_ROR_CFG_S */ + +/* + * struct iwl_prph_scratch_hwm_cfg - hwm config + * @hwm_base_addr: hwm start address + * @hwm_size: hwm size in DWs + * @reserved: reserved + */ +struct iwl_prph_scratch_hwm_cfg { + __le64 hwm_base_addr; + __le32 hwm_size; + __le32 reserved; +} __packed; /* PERIPH_SCRATCH_HWM_CFG_S */ + +/* + * struct iwl_prph_scratch_rbd_cfg - RBDs configuration + * @free_rbd_addr: default queue free RB CB base address + * @reserved: reserved + */ +struct iwl_prph_scratch_rbd_cfg { + __le64 free_rbd_addr; + __le32 reserved; +} __packed; /* PERIPH_SCRATCH_RBD_CFG_S */ + +/* + * struct iwl_prph_scratch_ctrl_cfg - prph scratch ctrl and config + * @version: version information of context info and HW + * @control: control flags of FH configurations + * @ror_cfg: ror configuration + * @hwm_cfg: hwm configuration + * @rbd_cfg: default RX queue configuration + */ +struct iwl_prph_scratch_ctrl_cfg { + struct iwl_prph_scratch_version version; + struct iwl_prph_scratch_control control; + struct iwl_prph_scratch_ror_cfg ror_cfg; + struct iwl_prph_scratch_hwm_cfg hwm_cfg; + struct iwl_prph_scratch_rbd_cfg rbd_cfg; +} __packed; /* PERIPH_SCRATCH_CTRL_CFG_S */ + +/* + * struct iwl_prph_scratch - peripheral scratch mapping + * @ctrl_cfg: control and configuration of prph scratch + * @dram: firmware images addresses in DRAM + * @reserved: reserved + */ +struct iwl_prph_scratch { + struct iwl_prph_scratch_ctrl_cfg ctrl_cfg; + __le32 reserved[16]; + struct iwl_context_info_dram dram; +} __packed; /* PERIPH_SCRATCH_S */ + +/* + * struct iwl_prph_info - peripheral information + * @boot_stage_mirror: reflects the value in the Boot Stage CSR register + * @ipc_status_mirror: reflects the value in the IPC Status CSR register + * @sleep_notif: indicates the peripheral sleep status + * @reserved: reserved + */ +struct iwl_prph_info { + __le32 boot_stage_mirror; + __le32 ipc_status_mirror; + __le32 sleep_notif; + __le32 reserved; +} __packed; /* PERIPH_INFO_S */ + +/* + * struct iwl_context_info_gen3 - device INIT configuration + * @version: version of the context information + * @size: size of context information in DWs + * @config: context in which the peripheral would execute - a subset of + * capability csr register published by the peripheral + * @prph_info_base_addr: the peripheral information structure start address + * @cr_head_idx_arr_base_addr: the completion ring head index array + * start address + * @tr_tail_idx_arr_base_addr: the transfer ring tail index array + * start address + * @cr_tail_idx_arr_base_addr: the completion ring tail index array + * start address + * @tr_head_idx_arr_base_addr: the transfer ring head index array + * start address + * @cr_idx_arr_size: number of entries in the completion ring index array + * @tr_idx_arr_size: number of entries in the transfer ring index array + * @mtr_base_addr: the message transfer ring start address + * @mcr_base_addr: the message completion ring start address + * @mtr_size: number of entries which the message transfer ring can hold + * @mcr_size: number of entries which the message completion ring can hold + * @mtr_doorbell_vec: the doorbell vector associated with the message + * transfer ring + * @mcr_doorbell_vec: the doorbell vector associated with the message + * completion ring + * @mtr_msi_vec: the MSI which shall be generated by the peripheral after + * completing a transfer descriptor in the message transfer ring + * @mcr_msi_vec: the MSI which shall be generated by the peripheral after + * completing a completion descriptor in the message completion ring + * @mtr_opt_header_size: the size of the optional header in the transfer + * descriptor associated with the message transfer ring in DWs + * @mtr_opt_footer_size: the size of the optional footer in the transfer + * descriptor associated with the message transfer ring in DWs + * @mcr_opt_header_size: the size of the optional header in the completion + * descriptor associated with the message completion ring in DWs + * @mcr_opt_footer_size: the size of the optional footer in the completion + * descriptor associated with the message completion ring in DWs + * @msg_rings_ctrl_flags: message rings control flags + * @prph_info_msi_vec: the MSI which shall be generated by the peripheral + * after updating the Peripheral Information structure + * @prph_scratch_base_addr: the peripheral scratch structure start address + * @prph_scratch_size: the size of the peripheral scratch structure in DWs + * @reserved: reserved + */ +struct iwl_context_info_gen3 { + __le16 version; + __le16 size; + __le32 config; + __le64 prph_info_base_addr; + __le64 cr_head_idx_arr_base_addr; + __le64 tr_tail_idx_arr_base_addr; + __le64 cr_tail_idx_arr_base_addr; + __le64 tr_head_idx_arr_base_addr; + __le16 cr_idx_arr_size; + __le16 tr_idx_arr_size; + __le64 mtr_base_addr; + __le64 mcr_base_addr; + __le16 mtr_size; + __le16 mcr_size; + __le16 mtr_doorbell_vec; + __le16 mcr_doorbell_vec; + __le16 mtr_msi_vec; + __le16 mcr_msi_vec; + u8 mtr_opt_header_size; + u8 mtr_opt_footer_size; + u8 mcr_opt_header_size; + u8 mcr_opt_footer_size; + __le16 msg_rings_ctrl_flags; + __le16 prph_info_msi_vec; + __le64 prph_scratch_base_addr; + __le32 prph_scratch_size; + __le32 reserved; +} __packed; /* IPC_CONTEXT_INFO_S */ + +int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, + const struct fw_img *fw); +void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans); + +#endif /* __iwl_context_info_file_gen3_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h index b870c0986744..4b6fdf3b15fb 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-context-info.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -19,6 +20,7 @@ * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -199,5 +201,8 @@ struct iwl_context_info { int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, const struct fw_img *fw); void iwl_pcie_ctxt_info_free(struct iwl_trans *trans); void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans); +int iwl_pcie_init_fw_sec(struct iwl_trans *trans, + const struct fw_img *fw, + struct iwl_context_info_dram *ctxt_dram); #endif /* __iwl_context_info_file_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h index ba971d3946e2..9019de99f077 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-csr.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-csr.h @@ -339,6 +339,9 @@ enum { /* HW_RF CHIP ID */ #define CSR_HW_RF_ID_TYPE_CHIP_ID(_val) (((_val) >> 12) & 0xFFF) +/* HW_RF CHIP STEP */ +#define CSR_HW_RF_STEP(_val) (((_val) >> 8) & 0xF) + /* EEPROM REG */ #define CSR_EEPROM_REG_READ_VALID_MSK (0x00000001) #define CSR_EEPROM_REG_BIT_CMD (0x00000002) @@ -592,6 +595,8 @@ enum msix_fh_int_causes { enum msix_hw_int_causes { MSIX_HW_INT_CAUSES_REG_ALIVE = BIT(0), MSIX_HW_INT_CAUSES_REG_WAKEUP = BIT(1), + MSIX_HW_INT_CAUSES_REG_IPC = BIT(1), + MSIX_HW_INT_CAUSES_REG_SW_ERR_V2 = BIT(5), MSIX_HW_INT_CAUSES_REG_CT_KILL = BIT(6), MSIX_HW_INT_CAUSES_REG_RF_KILL = BIT(7), MSIX_HW_INT_CAUSES_REG_PERIODIC = BIT(8), diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c index c59ce4f8a5ed..c0631255aee7 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-drv.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-drv.c @@ -402,35 +402,6 @@ static int iwl_store_cscheme(struct iwl_fw *fw, const u8 *data, const u32 len) return 0; } -static void iwl_store_gscan_capa(struct iwl_fw *fw, const u8 *data, - const u32 len) -{ - struct iwl_fw_gscan_capabilities *fw_capa = (void *)data; - struct iwl_gscan_capabilities *capa = &fw->gscan_capa; - - capa->max_scan_cache_size = le32_to_cpu(fw_capa->max_scan_cache_size); - capa->max_scan_buckets = le32_to_cpu(fw_capa->max_scan_buckets); - capa->max_ap_cache_per_scan = - le32_to_cpu(fw_capa->max_ap_cache_per_scan); - capa->max_rssi_sample_size = le32_to_cpu(fw_capa->max_rssi_sample_size); - capa->max_scan_reporting_threshold = - le32_to_cpu(fw_capa->max_scan_reporting_threshold); - capa->max_hotlist_aps = le32_to_cpu(fw_capa->max_hotlist_aps); - capa->max_significant_change_aps = - le32_to_cpu(fw_capa->max_significant_change_aps); - capa->max_bssid_history_entries = - le32_to_cpu(fw_capa->max_bssid_history_entries); - capa->max_hotlist_ssids = le32_to_cpu(fw_capa->max_hotlist_ssids); - capa->max_number_epno_networks = - le32_to_cpu(fw_capa->max_number_epno_networks); - capa->max_number_epno_networks_by_ssid = - le32_to_cpu(fw_capa->max_number_epno_networks_by_ssid); - capa->max_number_of_white_listed_ssid = - le32_to_cpu(fw_capa->max_number_of_white_listed_ssid); - capa->max_number_of_black_listed_ssid = - le32_to_cpu(fw_capa->max_number_of_black_listed_ssid); -} - /* * Gets uCode section from tlv. */ @@ -644,7 +615,6 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, u32 build, paging_mem_size; int num_of_cpus; bool usniffer_req = false; - bool gscan_capa = false; if (len < sizeof(*ucode)) { IWL_ERR(drv, "uCode has invalid length: %zd\n", len); @@ -1043,6 +1013,17 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, pieces->dbg_trigger_tlv_len[trigger_id] = tlv_len; break; } + case IWL_UCODE_TLV_FW_DBG_DUMP_LST: { + if (tlv_len != sizeof(u32)) { + IWL_ERR(drv, + "dbg lst mask size incorrect, skip\n"); + break; + } + + drv->fw.dbg_dump_mask = + le32_to_cpup((__le32 *)tlv_data); + break; + } case IWL_UCODE_TLV_SEC_RT_USNIFFER: *usniffer_images = true; iwl_store_ucode_sec(pieces, tlv_data, @@ -1079,16 +1060,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, paging_mem_size; break; case IWL_UCODE_TLV_FW_GSCAN_CAPA: - /* - * Don't return an error in case of a shorter tlv_len - * to enable loading of FW that has an old format - * of GSCAN capabilities TLV. - */ - if (tlv_len < sizeof(struct iwl_fw_gscan_capabilities)) - break; - - iwl_store_gscan_capa(&drv->fw, tlv_data, tlv_len); - gscan_capa = true; + /* ignored */ break; case IWL_UCODE_TLV_FW_MEM_SEG: { struct iwl_fw_dbg_mem_seg_tlv *dbg_mem = @@ -1153,19 +1125,6 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv, return -EINVAL; } - /* - * If ucode advertises that it supports GSCAN but GSCAN - * capabilities TLV is not present, or if it has an old format, - * warn and continue without GSCAN. - */ - if (fw_has_capa(capa, IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT) && - !gscan_capa) { - IWL_DEBUG_INFO(drv, - "GSCAN is supported but capabilities TLV is unavailable\n"); - __clear_bit((__force long)IWL_UCODE_TLV_CAPA_GSCAN_SUPPORT, - capa->_capa); - } - return 0; invalid_tlv_len: @@ -1316,6 +1275,8 @@ static void iwl_req_fw_callback(const struct firmware *ucode_raw, void *context) fw->ucode_capa.standard_phy_calibration_size = IWL_DEFAULT_STANDARD_PHY_CALIBRATE_TBL_SIZE; fw->ucode_capa.n_scan_channels = IWL_DEFAULT_SCAN_CHANNELS; + /* dump all fw memory areas by default */ + fw->dbg_dump_mask = 0xffffffff; pieces = kzalloc(sizeof(*pieces), GFP_KERNEL); if (!pieces) @@ -1787,7 +1748,8 @@ MODULE_PARM_DESC(11n_disable, "disable 11n functionality, bitmap: 1: full, 2: disable agg TX, 4: disable agg RX, 8 enable agg TX"); module_param_named(amsdu_size, iwlwifi_mod_params.amsdu_size, int, 0444); MODULE_PARM_DESC(amsdu_size, - "amsdu size 0: 12K for multi Rx queue devices, 4K for other devices 1:4K 2:8K 3:12K (default 0)"); + "amsdu size 0: 12K for multi Rx queue devices, 2K for 22560 devices, " + "4K for other devices 1:4K 2:8K 3:12K 4: 2K (default 0)"); module_param_named(fw_restart, iwlwifi_mod_params.fw_restart, bool, 0444); MODULE_PARM_DESC(fw_restart, "restart firmware in case of error (default true)"); @@ -1856,3 +1818,7 @@ module_param_named(remove_when_gone, 0444); MODULE_PARM_DESC(remove_when_gone, "Remove dev from PCIe bus if it is deemed inaccessible (default: false)"); + +module_param_named(disable_11ax, iwlwifi_mod_params.disable_11ax, bool, + S_IRUGO); +MODULE_PARM_DESC(disable_11ax, "Disable HE capabilities (default: false)"); diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c index 777f5df8a0c6..a4c96215933b 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-eeprom-parse.c @@ -7,6 +7,7 @@ * * Copyright(c) 2008 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Mobile Communications GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -18,9 +19,7 @@ * General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA + * along with this program; * * The full GNU General Public License is included in this distribution * in the file called COPYING. @@ -33,6 +32,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 Intel Mobile Communications GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -767,7 +767,7 @@ void iwl_init_ht_hw_capab(const struct iwl_cfg *cfg, ht_info->cap |= IEEE80211_HT_CAP_LDPC_CODING; if ((cfg->mq_rx_supported && - iwlwifi_mod_params.amsdu_size != IWL_AMSDU_4K) || + iwlwifi_mod_params.amsdu_size == IWL_AMSDU_DEF) || iwlwifi_mod_params.amsdu_size >= IWL_AMSDU_8K) ht_info->cap |= IEEE80211_HT_CAP_MAX_AMSDU; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h index 11789ffb6512..df0e9ffff706 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-fh.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-fh.h @@ -7,6 +7,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -18,9 +19,7 @@ * General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA + * along with this program. * * The full GNU General Public License is included in this distribution * in the file called COPYING. @@ -33,6 +32,7 @@ * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -434,13 +434,15 @@ static inline unsigned int FH_MEM_CBBC_QUEUE(struct iwl_trans *trans, * RXF to DRAM. * Once the RXF-to-DRAM DMA is active, this flag is immediately turned off. */ -#define RFH_GEN_STATUS 0xA09808 +#define RFH_GEN_STATUS 0xA09808 +#define RFH_GEN_STATUS_GEN3 0xA07824 #define RBD_FETCH_IDLE BIT(29) #define SRAM_DMA_IDLE BIT(30) #define RXF_DMA_IDLE BIT(31) /* DMA configuration */ -#define RFH_RXF_DMA_CFG 0xA09820 +#define RFH_RXF_DMA_CFG 0xA09820 +#define RFH_RXF_DMA_CFG_GEN3 0xA07880 /* RB size */ #define RFH_RXF_DMA_RB_SIZE_MASK (0x000F0000) /* bits 16-19 */ #define RFH_RXF_DMA_RB_SIZE_POS 16 @@ -643,10 +645,13 @@ struct iwl_rb_status { #define TFD_QUEUE_SIZE_MAX (256) +#define TFD_QUEUE_SIZE_MAX_GEN3 (65536) /* cb size is the exponent - 3 */ #define TFD_QUEUE_CB_SIZE(x) (ilog2(x) - 3) #define TFD_QUEUE_SIZE_BC_DUP (64) #define TFD_QUEUE_BC_SIZE (TFD_QUEUE_SIZE_MAX + TFD_QUEUE_SIZE_BC_DUP) +#define TFD_QUEUE_BC_SIZE_GEN3 (TFD_QUEUE_SIZE_MAX_GEN3 + \ + TFD_QUEUE_SIZE_BC_DUP) #define IWL_TX_DMA_MASK DMA_BIT_MASK(36) #define IWL_NUM_OF_TBS 20 #define IWL_TFH_NUM_TBS 25 @@ -753,7 +758,7 @@ struct iwl_tfh_tfd { * For devices up to 22000: * @tfd_offset 0-12 - tx command byte count * 12-16 - station index - * For 22000 and on: + * For 22000: * @tfd_offset 0-12 - tx command byte count * 12-13 - number of 64 byte chunks * 14-16 - reserved @@ -762,4 +767,15 @@ struct iwlagn_scd_bc_tbl { __le16 tfd_offset[TFD_QUEUE_BC_SIZE]; } __packed; +/** + * struct iwl_gen3_bc_tbl scheduler byte count table gen3 + * For 22560 and on: + * @tfd_offset: 0-12 - tx command byte count + * 12-13 - number of 64 byte chunks + * 14-16 - reserved + */ +struct iwl_gen3_bc_tbl { + __le16 tfd_offset[TFD_QUEUE_BC_SIZE_GEN3]; +} __packed; + #endif /* !__iwl_fh_h__ */ diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h index a7dd8a8cddf9..97072cf75bca 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-modparams.h @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -17,9 +18,7 @@ * General Public License for more details. * * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110, - * USA + * along with this program; * * The full GNU General Public License is included in this distribution * in the file called COPYING. @@ -31,6 +30,7 @@ * BSD LICENSE * * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved. + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -90,6 +90,8 @@ enum iwl_amsdu_size { IWL_AMSDU_4K = 1, IWL_AMSDU_8K = 2, IWL_AMSDU_12K = 3, + /* Add 2K at the end to avoid breaking current API */ + IWL_AMSDU_2K = 4, }; enum iwl_uapsd_disable { @@ -144,6 +146,10 @@ struct iwl_mod_params { bool lar_disable; bool fw_monitor; bool disable_11ac; + /** + * @disable_11ax: disable HE capabilities, default = false + */ + bool disable_11ax; bool remove_when_gone; }; diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c index b815ba38dbdb..b4c3a957c102 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c +++ b/drivers/net/wireless/intel/iwlwifi/iwl-nvm-parse.c @@ -430,6 +430,13 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, else vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895; break; + case IWL_AMSDU_2K: + if (cfg->mq_rx_supported) + vht_cap->cap |= + IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454; + else + WARN(1, "RB size of 2K is not supported by this device\n"); + break; case IWL_AMSDU_4K: vht_cap->cap |= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895; break; @@ -463,6 +470,101 @@ static void iwl_init_vht_hw_capab(const struct iwl_cfg *cfg, vht_cap->vht_mcs.tx_mcs_map = vht_cap->vht_mcs.rx_mcs_map; } +static struct ieee80211_sband_iftype_data iwl_he_capa = { + .types_mask = BIT(NL80211_IFTYPE_STATION) | BIT(NL80211_IFTYPE_AP), + .he_cap = { + .has_he = true, + .he_cap_elem = { + .mac_cap_info[0] = + IEEE80211_HE_MAC_CAP0_HTC_HE, + .mac_cap_info[1] = + IEEE80211_HE_MAC_CAP1_TF_MAC_PAD_DUR_16US | + IEEE80211_HE_MAC_CAP1_MULTI_TID_AGG_QOS_8, + .mac_cap_info[2] = + IEEE80211_HE_MAC_CAP2_32BIT_BA_BITMAP | + IEEE80211_HE_MAC_CAP2_ACK_EN, + .mac_cap_info[3] = + IEEE80211_HE_MAC_CAP3_GRP_ADDR_MULTI_STA_BA_DL_MU | + IEEE80211_HE_MAC_CAP3_MAX_A_AMPDU_LEN_EXP_VHT_2, + .mac_cap_info[4] = IEEE80211_HE_MAC_CAP4_AMDSU_IN_AMPDU, + .phy_cap_info[0] = + IEEE80211_HE_PHY_CAP0_DUAL_BAND | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_IN_2G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_40MHZ_80MHZ_IN_5G | + IEEE80211_HE_PHY_CAP0_CHANNEL_WIDTH_SET_160MHZ_IN_5G, + .phy_cap_info[1] = + IEEE80211_HE_PHY_CAP1_DEVICE_CLASS_A | + IEEE80211_HE_PHY_CAP1_LDPC_CODING_IN_PAYLOAD | + IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS, + .phy_cap_info[2] = + IEEE80211_HE_PHY_CAP2_NDP_4x_LTF_AND_3_2US | + IEEE80211_HE_PHY_CAP2_STBC_TX_UNDER_80MHZ | + IEEE80211_HE_PHY_CAP2_STBC_RX_UNDER_80MHZ, + .phy_cap_info[3] = + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_TX_BPSK | + IEEE80211_HE_PHY_CAP3_DCM_MAX_TX_NSS_1 | + IEEE80211_HE_PHY_CAP3_DCM_MAX_CONST_RX_BPSK | + IEEE80211_HE_PHY_CAP3_DCM_MAX_RX_NSS_1, + .phy_cap_info[4] = + IEEE80211_HE_PHY_CAP4_SU_BEAMFORMEE | + IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_ABOVE_80MHZ_8 | + IEEE80211_HE_PHY_CAP4_BEAMFORMEE_MAX_STS_UNDER_80MHZ_8, + .phy_cap_info[5] = + IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_UNDER_80MHZ_2 | + IEEE80211_HE_PHY_CAP5_BEAMFORMEE_NUM_SND_DIM_ABOVE_80MHZ_2, + .phy_cap_info[6] = + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT, + .phy_cap_info[7] = + IEEE80211_HE_PHY_CAP7_POWER_BOOST_FACTOR_AR | + IEEE80211_HE_PHY_CAP7_HE_SU_MU_PPDU_4XLTF_AND_08_US_GI | + IEEE80211_HE_PHY_CAP7_MAX_NC_7, + .phy_cap_info[8] = + IEEE80211_HE_PHY_CAP8_HE_ER_SU_PPDU_4XLTF_AND_08_US_GI | + IEEE80211_HE_PHY_CAP8_20MHZ_IN_40MHZ_HE_PPDU_IN_2G | + IEEE80211_HE_PHY_CAP8_20MHZ_IN_160MHZ_HE_PPDU | + IEEE80211_HE_PHY_CAP8_80MHZ_IN_160MHZ_HE_PPDU, + }, + /* + * Set default Tx/Rx HE MCS NSS Support field. Indicate support + * for up to 2 spatial streams and all MCS, without any special + * cases + */ + .he_mcs_nss_supp = { + .rx_mcs_80 = cpu_to_le16(0xfffa), + .tx_mcs_80 = cpu_to_le16(0xfffa), + .rx_mcs_160 = cpu_to_le16(0xfffa), + .tx_mcs_160 = cpu_to_le16(0xfffa), + .rx_mcs_80p80 = cpu_to_le16(0xffff), + .tx_mcs_80p80 = cpu_to_le16(0xffff), + }, + /* + * Set default PPE thresholds, with PPET16 set to 0, PPET8 set + * to 7 + */ + .ppe_thres = {0x61, 0x1c, 0xc7, 0x71}, + }, +}; + +static void iwl_init_he_hw_capab(struct ieee80211_supported_band *sband, + u8 tx_chains, u8 rx_chains) +{ + if (sband->band == NL80211_BAND_2GHZ || + sband->band == NL80211_BAND_5GHZ) + sband->iftype_data = &iwl_he_capa; + else + return; + + sband->n_iftype_data = 1; + + /* If not 2x2, we need to indicate 1x1 in the Midamble RX Max NSTS */ + if ((tx_chains & rx_chains) != ANT_AB) { + iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[1] &= + ~IEEE80211_HE_PHY_CAP1_MIDAMBLE_RX_MAX_NSTS; + iwl_he_capa.he_cap.he_cap_elem.phy_cap_info[2] &= + ~IEEE80211_HE_PHY_CAP2_MIDAMBLE_RX_MAX_NSTS; + } +} + static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, struct iwl_nvm_data *data, const __le16 *nvm_ch_flags, u8 tx_chains, @@ -483,6 +585,9 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, iwl_init_ht_hw_capab(cfg, data, &sband->ht_cap, NL80211_BAND_2GHZ, tx_chains, rx_chains); + if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax) + iwl_init_he_hw_capab(sband, tx_chains, rx_chains); + sband = &data->bands[NL80211_BAND_5GHZ]; sband->band = NL80211_BAND_5GHZ; sband->bitrates = &iwl_cfg80211_rates[RATES_52_OFFS]; @@ -495,6 +600,9 @@ static void iwl_init_sbands(struct device *dev, const struct iwl_cfg *cfg, iwl_init_vht_hw_capab(cfg, data, &sband->vht_cap, tx_chains, rx_chains); + if (data->sku_cap_11ax_enable && !iwlwifi_mod_params.disable_11ax) + iwl_init_he_hw_capab(sband, tx_chains, rx_chains); + if (n_channels != n_used) IWL_ERR_DEV(dev, "NVM: used only %d of %d channels\n", n_used, n_channels); @@ -1293,6 +1401,8 @@ struct iwl_nvm_data *iwl_get_nvm(struct iwl_trans *trans, !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AC_ENABLED); nvm->sku_cap_11n_enable = !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11N_ENABLED); + nvm->sku_cap_11ax_enable = + !!(mac_flags & NVM_MAC_SKU_FLAGS_802_11AX_ENABLED); nvm->sku_cap_band_24ghz_enable = !!(mac_flags & NVM_MAC_SKU_FLAGS_BAND_2_4_ENABLED); nvm->sku_cap_band_52ghz_enable = diff --git a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h index 1b9c627ee34d..279dd7b7a3fb 100644 --- a/drivers/net/wireless/intel/iwlwifi/iwl-trans.h +++ b/drivers/net/wireless/intel/iwlwifi/iwl-trans.h @@ -350,6 +350,8 @@ static inline int iwl_trans_get_rb_size_order(enum iwl_amsdu_size rb_size) { switch (rb_size) { + case IWL_AMSDU_2K: + return get_order(2 * 1024); case IWL_AMSDU_4K: return get_order(4 * 1024); case IWL_AMSDU_8K: @@ -438,6 +440,20 @@ struct iwl_trans_txq_scd_cfg { }; /** + * struct iwl_trans_rxq_dma_data - RX queue DMA data + * @fr_bd_cb: DMA address of free BD cyclic buffer + * @fr_bd_wid: Initial write index of the free BD cyclic buffer + * @urbd_stts_wrptr: DMA address of urbd_stts_wrptr + * @ur_bd_cb: DMA address of used BD cyclic buffer + */ +struct iwl_trans_rxq_dma_data { + u64 fr_bd_cb; + u32 fr_bd_wid; + u64 urbd_stts_wrptr; + u64 ur_bd_cb; +}; + +/** * struct iwl_trans_ops - transport specific operations * * All the handlers MUST be implemented @@ -557,6 +573,8 @@ struct iwl_trans_ops { int cmd_id, int size, unsigned int queue_wdg_timeout); void (*txq_free)(struct iwl_trans *trans, int queue); + int (*rxq_dma_data)(struct iwl_trans *trans, int queue, + struct iwl_trans_rxq_dma_data *data); void (*txq_set_shared_mode)(struct iwl_trans *trans, u32 txq_id, bool shared); @@ -753,6 +771,7 @@ struct iwl_trans { const struct iwl_fw_dbg_dest_tlv_v1 *dbg_dest_tlv; const struct iwl_fw_dbg_conf_tlv *dbg_conf_tlv[FW_DBG_CONF_MAX]; struct iwl_fw_dbg_trigger_tlv * const *dbg_trigger_tlv; + u32 dbg_dump_mask; u8 dbg_dest_reg_num; enum iwl_plat_pm_mode system_pm_mode; @@ -945,6 +964,16 @@ iwl_trans_txq_enable_cfg(struct iwl_trans *trans, int queue, u16 ssn, cfg, queue_wdg_timeout); } +static inline int +iwl_trans_get_rxq_dma_data(struct iwl_trans *trans, int queue, + struct iwl_trans_rxq_dma_data *data) +{ + if (WARN_ON_ONCE(!trans->ops->rxq_dma_data)) + return -ENOTSUPP; + + return trans->ops->rxq_dma_data(trans, queue, data); +} + static inline void iwl_trans_txq_free(struct iwl_trans *trans, int queue) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c index 3fcf489f3120..79bdae994822 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/d3.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/d3.c @@ -1037,6 +1037,13 @@ static int __iwl_mvm_suspend(struct ieee80211_hw *hw, cpu_to_le32(IWL_WAKEUP_D3_CONFIG_FW_ERROR); #endif + /* + * TODO: this is needed because the firmware is not stopping + * the recording automatically before entering D3. This can + * be removed once the FW starts doing that. + */ + iwl_fw_dbg_stop_recording(&mvm->fwrt); + /* must be last -- this switches firmware state */ ret = iwl_mvm_send_cmd(mvm, &d3_cfg_cmd); if (ret) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c index 1c4178f20441..05b77419953c 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/debugfs.c @@ -1150,6 +1150,10 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm, struct iwl_rx_mpdu_desc *desc; int bin_len = count / 2; int ret = -EINVAL; + size_t mpdu_cmd_hdr_size = + (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ? + sizeof(struct iwl_rx_mpdu_desc) : + IWL_RX_DESC_SIZE_V1; if (!iwl_mvm_firmware_running(mvm)) return -EIO; @@ -1168,7 +1172,7 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm, goto out; /* avoid invalid memory access */ - if (bin_len < sizeof(*pkt) + sizeof(*desc)) + if (bin_len < sizeof(*pkt) + mpdu_cmd_hdr_size) goto out; /* check this is RX packet */ @@ -1179,7 +1183,7 @@ static ssize_t iwl_dbgfs_inject_packet_write(struct iwl_mvm *mvm, /* check the length in metadata matches actual received length */ desc = (void *)pkt->data; if (le16_to_cpu(desc->mpdu_len) != - (bin_len - sizeof(*desc) - sizeof(*pkt))) + (bin_len - mpdu_cmd_hdr_size - sizeof(*pkt))) goto out; local_bh_disable(); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c index 866c91c923be..6bb1a99a197a 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/fw.c @@ -130,6 +130,41 @@ static int iwl_send_rss_cfg_cmd(struct iwl_mvm *mvm) return iwl_mvm_send_cmd_pdu(mvm, RSS_CONFIG_CMD, 0, sizeof(cmd), &cmd); } +static int iwl_configure_rxq(struct iwl_mvm *mvm) +{ + int i, num_queues, size; + struct iwl_rfh_queue_config *cmd; + + /* Do not configure default queue, it is configured via context info */ + num_queues = mvm->trans->num_rx_queues - 1; + + size = sizeof(*cmd) + num_queues * sizeof(struct iwl_rfh_queue_data); + + cmd = kzalloc(size, GFP_KERNEL); + if (!cmd) + return -ENOMEM; + + cmd->num_queues = num_queues; + + for (i = 0; i < num_queues; i++) { + struct iwl_trans_rxq_dma_data data; + + cmd->data[i].q_num = i + 1; + iwl_trans_get_rxq_dma_data(mvm->trans, i + 1, &data); + + cmd->data[i].fr_bd_cb = cpu_to_le64(data.fr_bd_cb); + cmd->data[i].urbd_stts_wrptr = + cpu_to_le64(data.urbd_stts_wrptr); + cmd->data[i].ur_bd_cb = cpu_to_le64(data.ur_bd_cb); + cmd->data[i].fr_bd_wid = cpu_to_le32(data.fr_bd_wid); + } + + return iwl_mvm_send_cmd_pdu(mvm, + WIDE_ID(DATA_PATH_GROUP, + RFH_QUEUE_CONFIG_CMD), + 0, size, cmd); +} + static int iwl_mvm_send_dqa_cmd(struct iwl_mvm *mvm) { struct iwl_dqa_enable_cmd dqa_cmd = { @@ -301,7 +336,7 @@ static int iwl_mvm_load_ucode_wait_alive(struct iwl_mvm *mvm, if (ret) { struct iwl_trans *trans = mvm->trans; - if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22000) + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) IWL_ERR(mvm, "SecBoot CPU1 Status: 0x%x, CPU2 Status: 0x%x\n", iwl_read_prph(trans, UMAG_SB_CPU_1_STATUS), @@ -1007,9 +1042,16 @@ int iwl_mvm_up(struct iwl_mvm *mvm) goto error; /* Init RSS configuration */ - /* TODO - remove 22000 disablement when we have RXQ config API */ - if (iwl_mvm_has_new_rx_api(mvm) && - mvm->trans->cfg->device_family != IWL_DEVICE_FAMILY_22000) { + if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22000) { + ret = iwl_configure_rxq(mvm); + if (ret) { + IWL_ERR(mvm, "Failed to configure RX queues: %d\n", + ret); + goto error; + } + } + + if (iwl_mvm_has_new_rx_api(mvm)) { ret = iwl_send_rss_cfg_cmd(mvm); if (ret) { IWL_ERR(mvm, "Failed to configure RSS queues: %d\n", diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c index 8ba16fc24e3a..b3fd20502abb 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac-ctxt.c @@ -780,6 +780,10 @@ static int iwl_mvm_mac_ctxt_cmd_sta(struct iwl_mvm *mvm, if (vif->probe_req_reg && vif->bss_conf.assoc && vif->p2p) cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_PROBE_REQUEST); + if (vif->bss_conf.assoc && vif->bss_conf.he_support && + !iwlwifi_mod_params.disable_11ax) + cmd.filter_flags |= cpu_to_le32(MAC_FILTER_IN_11AX); + return iwl_mvm_mac_ctxt_send_cmd(mvm, &cmd); } diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c index a6e072234398..b15b0d84bb7e 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mac80211.c @@ -36,6 +36,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -914,7 +915,7 @@ static int iwl_mvm_mac_ampdu_action(struct ieee80211_hw *hw, enum ieee80211_ampdu_mlme_action action = params->action; u16 tid = params->tid; u16 *ssn = ¶ms->ssn; - u8 buf_size = params->buf_size; + u16 buf_size = params->buf_size; bool amsdu = params->amsdu; u16 timeout = params->timeout; @@ -1897,6 +1898,194 @@ void iwl_mvm_mu_mimo_grp_notif(struct iwl_mvm *mvm, iwl_mvm_mu_mimo_iface_iterator, notif); } +static u8 iwl_mvm_he_get_ppe_val(u8 *ppe, u8 ppe_pos_bit) +{ + u8 byte_num = ppe_pos_bit / 8; + u8 bit_num = ppe_pos_bit % 8; + u8 residue_bits; + u8 res; + + if (bit_num <= 5) + return (ppe[byte_num] >> bit_num) & + (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE) - 1); + + /* + * If bit_num > 5, we have to combine bits with next byte. + * Calculate how many bits we need to take from current byte (called + * here "residue_bits"), and add them to bits from next byte. + */ + + residue_bits = 8 - bit_num; + + res = (ppe[byte_num + 1] & + (BIT(IEEE80211_PPE_THRES_INFO_PPET_SIZE - residue_bits) - 1)) << + residue_bits; + res += (ppe[byte_num] >> bit_num) & (BIT(residue_bits) - 1); + + return res; +} + +static void iwl_mvm_cfg_he_sta(struct iwl_mvm *mvm, + struct ieee80211_vif *vif, u8 sta_id) +{ + struct iwl_mvm_vif *mvmvif = iwl_mvm_vif_from_mac80211(vif); + struct iwl_he_sta_context_cmd sta_ctxt_cmd = { + .sta_id = sta_id, + .tid_limit = IWL_MAX_TID_COUNT, + .bss_color = vif->bss_conf.bss_color, + .htc_trig_based_pkt_ext = vif->bss_conf.htc_trig_based_pkt_ext, + .frame_time_rts_th = + cpu_to_le16(vif->bss_conf.frame_time_rts_th), + }; + struct ieee80211_sta *sta; + u32 flags; + int i; + + rcu_read_lock(); + + sta = rcu_dereference(mvm->fw_id_to_mac_id[sta_ctxt_cmd.sta_id]); + if (IS_ERR(sta)) { + rcu_read_unlock(); + WARN(1, "Can't find STA to configure HE\n"); + return; + } + + if (!sta->he_cap.has_he) { + rcu_read_unlock(); + return; + } + + flags = 0; + + /* HTC flags */ + if (sta->he_cap.he_cap_elem.mac_cap_info[0] & + IEEE80211_HE_MAC_CAP0_HTC_HE) + sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_SUPPORT); + if ((sta->he_cap.he_cap_elem.mac_cap_info[1] & + IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION) || + (sta->he_cap.he_cap_elem.mac_cap_info[2] & + IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION)) { + u8 link_adap = + ((sta->he_cap.he_cap_elem.mac_cap_info[2] & + IEEE80211_HE_MAC_CAP2_LINK_ADAPTATION) << 1) + + (sta->he_cap.he_cap_elem.mac_cap_info[1] & + IEEE80211_HE_MAC_CAP1_LINK_ADAPTATION); + + if (link_adap == 2) + sta_ctxt_cmd.htc_flags |= + cpu_to_le32(IWL_HE_HTC_LINK_ADAP_UNSOLICITED); + else if (link_adap == 3) + sta_ctxt_cmd.htc_flags |= + cpu_to_le32(IWL_HE_HTC_LINK_ADAP_BOTH); + } + if (sta->he_cap.he_cap_elem.mac_cap_info[2] & + IEEE80211_HE_MAC_CAP2_UL_MU_RESP_SCHED) + sta_ctxt_cmd.htc_flags |= + cpu_to_le32(IWL_HE_HTC_UL_MU_RESP_SCHED); + if (sta->he_cap.he_cap_elem.mac_cap_info[2] & IEEE80211_HE_MAC_CAP2_BSR) + sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BSR_SUPP); + if (sta->he_cap.he_cap_elem.mac_cap_info[3] & + IEEE80211_HE_MAC_CAP3_OMI_CONTROL) + sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_OMI_SUPP); + if (sta->he_cap.he_cap_elem.mac_cap_info[4] & IEEE80211_HE_MAC_CAP4_BQR) + sta_ctxt_cmd.htc_flags |= cpu_to_le32(IWL_HE_HTC_BQR_SUPP); + + /* If PPE Thresholds exist, parse them into a FW-familiar format */ + if (sta->he_cap.he_cap_elem.phy_cap_info[6] & + IEEE80211_HE_PHY_CAP6_PPE_THRESHOLD_PRESENT) { + u8 nss = (sta->he_cap.ppe_thres[0] & + IEEE80211_PPE_THRES_NSS_MASK) + 1; + u8 ru_index_bitmap = + (sta->he_cap.ppe_thres[0] & + IEEE80211_PPE_THRES_RU_INDEX_BITMASK_MASK) >> + IEEE80211_PPE_THRES_RU_INDEX_BITMASK_POS; + u8 *ppe = &sta->he_cap.ppe_thres[0]; + u8 ppe_pos_bit = 7; /* Starting after PPE header */ + + /* + * FW currently supports only nss == MAX_HE_SUPP_NSS + * + * If nss > MAX: we can ignore values we don't support + * If nss < MAX: we can set zeros in other streams + */ + if (nss > MAX_HE_SUPP_NSS) { + IWL_INFO(mvm, "Got NSS = %d - trimming to %d\n", nss, + MAX_HE_SUPP_NSS); + nss = MAX_HE_SUPP_NSS; + } + + for (i = 0; i < nss; i++) { + u8 ru_index_tmp = ru_index_bitmap << 1; + u8 bw; + + for (bw = 0; bw < MAX_HE_CHANNEL_BW_INDX; bw++) { + ru_index_tmp >>= 1; + if (!(ru_index_tmp & 1)) + continue; + + sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][1] = + iwl_mvm_he_get_ppe_val(ppe, + ppe_pos_bit); + ppe_pos_bit += + IEEE80211_PPE_THRES_INFO_PPET_SIZE; + sta_ctxt_cmd.pkt_ext.pkt_ext_qam_th[i][bw][0] = + iwl_mvm_he_get_ppe_val(ppe, + ppe_pos_bit); + ppe_pos_bit += + IEEE80211_PPE_THRES_INFO_PPET_SIZE; + } + } + + flags |= STA_CTXT_HE_PACKET_EXT; + } + rcu_read_unlock(); + + /* Mark MU EDCA as enabled, unless none detected on some AC */ + flags |= STA_CTXT_HE_MU_EDCA_CW; + for (i = 0; i < AC_NUM; i++) { + struct ieee80211_he_mu_edca_param_ac_rec *mu_edca = + &mvmvif->queue_params[i].mu_edca_param_rec; + + if (!mvmvif->queue_params[i].mu_edca) { + flags &= ~STA_CTXT_HE_MU_EDCA_CW; + break; + } + + sta_ctxt_cmd.trig_based_txf[i].cwmin = + cpu_to_le16(mu_edca->ecw_min_max & 0xf); + sta_ctxt_cmd.trig_based_txf[i].cwmax = + cpu_to_le16((mu_edca->ecw_min_max & 0xf0) >> 4); + sta_ctxt_cmd.trig_based_txf[i].aifsn = + cpu_to_le16(mu_edca->aifsn); + sta_ctxt_cmd.trig_based_txf[i].mu_time = + cpu_to_le16(mu_edca->mu_edca_timer); + } + + if (vif->bss_conf.multi_sta_back_32bit) + flags |= STA_CTXT_HE_32BIT_BA_BITMAP; + + if (vif->bss_conf.ack_enabled) + flags |= STA_CTXT_HE_ACK_ENABLED; + + if (vif->bss_conf.uora_exists) { + flags |= STA_CTXT_HE_TRIG_RND_ALLOC; + + sta_ctxt_cmd.rand_alloc_ecwmin = + vif->bss_conf.uora_ocw_range & 0x7; + sta_ctxt_cmd.rand_alloc_ecwmax = + (vif->bss_conf.uora_ocw_range >> 3) & 0x7; + } + + /* TODO: support Multi BSSID IE */ + + sta_ctxt_cmd.flags = cpu_to_le32(flags); + + if (iwl_mvm_send_cmd_pdu(mvm, iwl_cmd_id(STA_HE_CTXT_CMD, + DATA_PATH_GROUP, 0), + 0, sizeof(sta_ctxt_cmd), &sta_ctxt_cmd)) + IWL_ERR(mvm, "Failed to config FW to work HE!\n"); +} + static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_bss_conf *bss_conf, @@ -1910,8 +2099,13 @@ static void iwl_mvm_bss_info_changed_station(struct iwl_mvm *mvm, * beacon interval, which was not known when the station interface was * added. */ - if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) + if (changes & BSS_CHANGED_ASSOC && bss_conf->assoc) { + if (vif->bss_conf.he_support && + !iwlwifi_mod_params.disable_11ax) + iwl_mvm_cfg_he_sta(mvm, vif, mvmvif->ap_sta_id); + iwl_mvm_mac_ctxt_recalc_tsf_id(mvm, vif); + } /* * If we're not associated yet, take the (new) BSSID before associating @@ -4216,7 +4410,7 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, if (mvmsta->avg_energy) { sinfo->signal_avg = mvmsta->avg_energy; - sinfo->filled |= BIT(NL80211_STA_INFO_SIGNAL_AVG); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_SIGNAL_AVG); } if (!fw_has_capa(&mvm->fw->ucode_capa, @@ -4240,11 +4434,11 @@ static void iwl_mvm_mac_sta_statistics(struct ieee80211_hw *hw, sinfo->rx_beacon = mvmvif->beacon_stats.num_beacons + mvmvif->beacon_stats.accu_num_beacons; - sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_RX); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_RX); if (mvmvif->beacon_stats.avg_signal) { /* firmware only reports a value after RXing a few beacons */ sinfo->rx_beacon_signal_avg = mvmvif->beacon_stats.avg_signal; - sinfo->filled |= BIT(NL80211_STA_INFO_BEACON_SIGNAL_AVG); + sinfo->filled |= BIT_ULL(NL80211_STA_INFO_BEACON_SIGNAL_AVG); } unlock: mutex_unlock(&mvm->mutex); @@ -4364,13 +4558,6 @@ void iwl_mvm_sync_rx_queues_internal(struct iwl_mvm *mvm, atomic_set(&mvm->queue_sync_counter, mvm->trans->num_rx_queues); - /* TODO - remove this when we have RXQ config API */ - if (mvm->trans->cfg->device_family == IWL_DEVICE_FAMILY_22000) { - qmask = BIT(0); - if (notif->sync) - atomic_set(&mvm->queue_sync_counter, 1); - } - ret = iwl_mvm_notify_rx_queue(mvm, qmask, (u8 *)notif, size); if (ret) { IWL_ERR(mvm, "Failed to trigger RX queues sync (%d)\n", ret); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h index 6a4ba160c59e..b3987a0a7018 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/mvm.h @@ -654,7 +654,7 @@ struct iwl_mvm_tcm { struct iwl_mvm_reorder_buffer { u16 head_sn; u16 num_stored; - u8 buf_size; + u16 buf_size; int queue; u16 last_amsdu; u8 last_sub_index; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c index ff1e518096c5..0e26619fb330 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/ops.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/ops.c @@ -448,6 +448,8 @@ static const struct iwl_hcmd_names iwl_mvm_data_path_names[] = { HCMD_NAME(DQA_ENABLE_CMD), HCMD_NAME(UPDATE_MU_GROUPS_CMD), HCMD_NAME(TRIGGER_RX_QUEUES_NOTIF_CMD), + HCMD_NAME(STA_HE_CTXT_CMD), + HCMD_NAME(RFH_QUEUE_CONFIG_CMD), HCMD_NAME(STA_PM_NOTIF), HCMD_NAME(MU_GROUP_MGMT_NOTIF), HCMD_NAME(RX_QUEUES_NOTIFICATION), @@ -620,7 +622,11 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, if (iwl_mvm_has_new_rx_api(mvm)) { op_mode->ops = &iwl_mvm_ops_mq; - trans->rx_mpdu_cmd_hdr_size = sizeof(struct iwl_rx_mpdu_desc); + trans->rx_mpdu_cmd_hdr_size = + (trans->cfg->device_family >= + IWL_DEVICE_FAMILY_22560) ? + sizeof(struct iwl_rx_mpdu_desc) : + IWL_RX_DESC_SIZE_V1; } else { op_mode->ops = &iwl_mvm_ops; trans->rx_mpdu_cmd_hdr_size = @@ -703,11 +709,17 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, } /* the hardware splits the A-MSDU */ - if (mvm->cfg->mq_rx_supported) + if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { + trans_cfg.rx_buf_size = IWL_AMSDU_2K; + /* TODO: remove when balanced power mode is fw supported */ + iwlmvm_mod_params.power_scheme = IWL_POWER_SCHEME_CAM; + } else if (mvm->cfg->mq_rx_supported) { trans_cfg.rx_buf_size = IWL_AMSDU_4K; + } trans->wide_cmd_header = true; - trans_cfg.bc_table_dword = true; + trans_cfg.bc_table_dword = + mvm->trans->cfg->device_family < IWL_DEVICE_FAMILY_22560; trans_cfg.command_groups = iwl_mvm_groups; trans_cfg.command_groups_size = ARRAY_SIZE(iwl_mvm_groups); @@ -738,6 +750,7 @@ iwl_op_mode_mvm_start(struct iwl_trans *trans, const struct iwl_cfg *cfg, memcpy(trans->dbg_conf_tlv, mvm->fw->dbg_conf_tlv, sizeof(trans->dbg_conf_tlv)); trans->dbg_trigger_tlv = mvm->fw->dbg_trigger_tlv; + trans->dbg_dump_mask = mvm->fw->dbg_dump_mask; trans->iml = mvm->fw->iml; trans->iml_len = mvm->fw->iml_len; @@ -1003,10 +1016,8 @@ static void iwl_mvm_rx_common(struct iwl_mvm *mvm, list_add_tail(&entry->list, &mvm->async_handlers_list); spin_unlock(&mvm->async_handlers_lock); schedule_work(&mvm->async_handlers_wk); - return; + break; } - - iwl_fwrt_handle_notification(&mvm->fwrt, rxb); } static void iwl_mvm_rx(struct iwl_op_mode *op_mode, diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c index b8b2b819e8e7..8169d1450b3b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs-fw.c @@ -183,6 +183,43 @@ rs_fw_vht_set_enabled_rates(const struct ieee80211_sta *sta, } } +static u16 rs_fw_he_ieee80211_mcs_to_rs_mcs(u16 mcs) +{ + switch (mcs) { + case IEEE80211_HE_MCS_SUPPORT_0_7: + return BIT(IWL_TLC_MNG_HT_RATE_MCS7 + 1) - 1; + case IEEE80211_HE_MCS_SUPPORT_0_9: + return BIT(IWL_TLC_MNG_HT_RATE_MCS9 + 1) - 1; + case IEEE80211_HE_MCS_SUPPORT_0_11: + return BIT(IWL_TLC_MNG_HT_RATE_MCS11 + 1) - 1; + case IEEE80211_HE_MCS_NOT_SUPPORTED: + return 0; + } + + WARN(1, "invalid HE MCS %d\n", mcs); + return 0; +} + +static void +rs_fw_he_set_enabled_rates(const struct ieee80211_sta *sta, + const struct ieee80211_sta_he_cap *he_cap, + struct iwl_tlc_config_cmd *cmd) +{ + u16 mcs_160 = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_160); + u16 mcs_80 = le16_to_cpu(sta->he_cap.he_mcs_nss_supp.rx_mcs_80); + int i; + + for (i = 0; i < sta->rx_nss && i < MAX_NSS; i++) { + u16 _mcs_160 = (mcs_160 >> (2 * i)) & 0x3; + u16 _mcs_80 = (mcs_80 >> (2 * i)) & 0x3; + + cmd->ht_rates[i][0] = + cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_80)); + cmd->ht_rates[i][1] = + cpu_to_le16(rs_fw_he_ieee80211_mcs_to_rs_mcs(_mcs_160)); + } +} + static void rs_fw_set_supp_rates(struct ieee80211_sta *sta, struct ieee80211_supported_band *sband, struct iwl_tlc_config_cmd *cmd) @@ -192,6 +229,7 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta, unsigned long supp; /* must be unsigned long for for_each_set_bit */ const struct ieee80211_sta_ht_cap *ht_cap = &sta->ht_cap; const struct ieee80211_sta_vht_cap *vht_cap = &sta->vht_cap; + const struct ieee80211_sta_he_cap *he_cap = &sta->he_cap; /* non HT rates */ supp = 0; @@ -202,7 +240,11 @@ static void rs_fw_set_supp_rates(struct ieee80211_sta *sta, cmd->non_ht_rates = cpu_to_le16(supp); cmd->mode = IWL_TLC_MNG_MODE_NON_HT; - if (vht_cap && vht_cap->vht_supported) { + /* HT/VHT rates */ + if (he_cap && he_cap->has_he) { + cmd->mode = IWL_TLC_MNG_MODE_HE; + rs_fw_he_set_enabled_rates(sta, he_cap, cmd); + } else if (vht_cap && vht_cap->vht_supported) { cmd->mode = IWL_TLC_MNG_MODE_VHT; rs_fw_vht_set_enabled_rates(sta, vht_cap, cmd); } else if (ht_cap && ht_cap->ht_supported) { diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c index 642da10b0b7f..30cfd7d50bc9 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.c @@ -363,7 +363,8 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags) idx += 1; if ((idx >= IWL_FIRST_HT_RATE) && (idx <= IWL_LAST_HT_RATE)) return idx; - } else if (rate_n_flags & RATE_MCS_VHT_MSK) { + } else if (rate_n_flags & RATE_MCS_VHT_MSK || + rate_n_flags & RATE_MCS_HE_MSK) { idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; idx += IWL_RATE_MCS_0_INDEX; @@ -372,6 +373,9 @@ static int iwl_hwrate_to_plcp_idx(u32 rate_n_flags) idx++; if ((idx >= IWL_FIRST_VHT_RATE) && (idx <= IWL_LAST_VHT_RATE)) return idx; + if ((rate_n_flags & RATE_MCS_HE_MSK) && + (idx <= IWL_LAST_HE_RATE)) + return idx; } else { /* legacy rate format, search for match in table */ @@ -516,6 +520,8 @@ static const char *rs_pretty_lq_type(enum iwl_table_type type) [LQ_HT_MIMO2] = "HT MIMO", [LQ_VHT_SISO] = "VHT SISO", [LQ_VHT_MIMO2] = "VHT MIMO", + [LQ_HE_SISO] = "HE SISO", + [LQ_HE_MIMO2] = "HE MIMO", }; if (type < LQ_NONE || type >= LQ_MAX) @@ -900,7 +906,8 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate, /* Legacy */ if (!(ucode_rate & RATE_MCS_HT_MSK) && - !(ucode_rate & RATE_MCS_VHT_MSK)) { + !(ucode_rate & RATE_MCS_VHT_MSK) && + !(ucode_rate & RATE_MCS_HE_MSK)) { if (num_of_ant == 1) { if (band == NL80211_BAND_5GHZ) rate->type = LQ_LEGACY_A; @@ -911,7 +918,7 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate, return 0; } - /* HT or VHT */ + /* HT, VHT or HE */ if (ucode_rate & RATE_MCS_SGI_MSK) rate->sgi = true; if (ucode_rate & RATE_MCS_LDPC_MSK) @@ -953,10 +960,24 @@ static int rs_rate_from_ucode_rate(const u32 ucode_rate, } else { WARN_ON_ONCE(1); } + } else if (ucode_rate & RATE_MCS_HE_MSK) { + nss = ((ucode_rate & RATE_VHT_MCS_NSS_MSK) >> + RATE_VHT_MCS_NSS_POS) + 1; + + if (nss == 1) { + rate->type = LQ_HE_SISO; + WARN_ONCE(!rate->stbc && !rate->bfer && num_of_ant != 1, + "stbc %d bfer %d", rate->stbc, rate->bfer); + } else if (nss == 2) { + rate->type = LQ_HE_MIMO2; + WARN_ON_ONCE(num_of_ant != 2); + } else { + WARN_ON_ONCE(1); + } } WARN_ON_ONCE(rate->bw == RATE_MCS_CHAN_WIDTH_80 && - !is_vht(rate)); + !is_he(rate) && !is_vht(rate)); return 0; } @@ -3606,7 +3627,8 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate) u8 ant = (rate & RATE_MCS_ANT_ABC_MSK) >> RATE_MCS_ANT_POS; if (!(rate & RATE_MCS_HT_MSK) && - !(rate & RATE_MCS_VHT_MSK)) { + !(rate & RATE_MCS_VHT_MSK) && + !(rate & RATE_MCS_HE_MSK)) { int index = iwl_hwrate_to_plcp_idx(rate); return scnprintf(buf, bufsz, "Legacy | ANT: %s Rate: %s Mbps\n", @@ -3625,6 +3647,11 @@ int rs_pretty_print_rate(char *buf, int bufsz, const u32 rate) mcs = rate & RATE_HT_MCS_INDEX_MSK; nss = ((rate & RATE_HT_MCS_NSS_MSK) >> RATE_HT_MCS_NSS_POS) + 1; + } else if (rate & RATE_MCS_HE_MSK) { + type = "HE"; + mcs = rate & RATE_VHT_MCS_RATE_CODE_MSK; + nss = ((rate & RATE_VHT_MCS_NSS_MSK) + >> RATE_VHT_MCS_NSS_POS) + 1; } else { type = "Unknown"; /* shouldn't happen */ } @@ -3886,6 +3913,8 @@ static ssize_t rs_sta_dbgfs_drv_tx_stats_read(struct file *file, [IWL_RATE_MCS_7_INDEX] = "MCS7", [IWL_RATE_MCS_8_INDEX] = "MCS8", [IWL_RATE_MCS_9_INDEX] = "MCS9", + [IWL_RATE_MCS_10_INDEX] = "MCS10", + [IWL_RATE_MCS_11_INDEX] = "MCS11", }; char *buff, *pos, *endpos; diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h index cffb8c852934..d2cf484e2b73 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rs.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rs.h @@ -144,8 +144,13 @@ enum { #define LINK_QUAL_AGG_FRAME_LIMIT_DEF (63) #define LINK_QUAL_AGG_FRAME_LIMIT_MAX (63) -#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF (64) -#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX (64) +/* + * FIXME - various places in firmware API still use u8, + * e.g. LQ command and SCD config command. + * This should be 256 instead. + */ +#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_DEF (255) +#define LINK_QUAL_AGG_FRAME_LIMIT_GEN2_MAX (255) #define LINK_QUAL_AGG_FRAME_LIMIT_MIN (0) #define LQ_SIZE 2 /* 2 mode tables: "Active" and "Search" */ @@ -162,6 +167,8 @@ enum iwl_table_type { LQ_HT_MIMO2, LQ_VHT_SISO, /* VHT types */ LQ_VHT_MIMO2, + LQ_HE_SISO, /* HE types */ + LQ_HE_MIMO2, LQ_MAX, }; @@ -183,11 +190,16 @@ struct rs_rate { #define is_type_ht_mimo2(type) ((type) == LQ_HT_MIMO2) #define is_type_vht_siso(type) ((type) == LQ_VHT_SISO) #define is_type_vht_mimo2(type) ((type) == LQ_VHT_MIMO2) -#define is_type_siso(type) (is_type_ht_siso(type) || is_type_vht_siso(type)) -#define is_type_mimo2(type) (is_type_ht_mimo2(type) || is_type_vht_mimo2(type)) +#define is_type_he_siso(type) ((type) == LQ_HE_SISO) +#define is_type_he_mimo2(type) ((type) == LQ_HE_MIMO2) +#define is_type_siso(type) (is_type_ht_siso(type) || is_type_vht_siso(type) || \ + is_type_he_siso(type)) +#define is_type_mimo2(type) (is_type_ht_mimo2(type) || \ + is_type_vht_mimo2(type) || is_type_he_mimo2(type)) #define is_type_mimo(type) (is_type_mimo2(type)) #define is_type_ht(type) (is_type_ht_siso(type) || is_type_ht_mimo2(type)) #define is_type_vht(type) (is_type_vht_siso(type) || is_type_vht_mimo2(type)) +#define is_type_he(type) (is_type_he_siso(type) || is_type_he_mimo2(type)) #define is_type_a_band(type) ((type) == LQ_LEGACY_A) #define is_type_g_band(type) ((type) == LQ_LEGACY_G) @@ -201,6 +213,7 @@ struct rs_rate { #define is_mimo(rate) is_type_mimo((rate)->type) #define is_ht(rate) is_type_ht((rate)->type) #define is_vht(rate) is_type_vht((rate)->type) +#define is_he(rate) is_type_he((rate)->type) #define is_a_band(rate) is_type_a_band((rate)->type) #define is_g_band(rate) is_type_g_band((rate)->type) diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c index 129c4c09648d..b53148f972a4 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/rxmq.c @@ -8,6 +8,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -30,6 +31,7 @@ * Copyright(c) 2012 - 2014 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2015 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -196,22 +198,31 @@ static void iwl_mvm_pass_packet_to_mac80211(struct iwl_mvm *mvm, struct sk_buff *skb, int queue, struct ieee80211_sta *sta) { - if (iwl_mvm_check_pn(mvm, skb, queue, sta)) + struct ieee80211_rx_status *rx_status = IEEE80211_SKB_RXCB(skb); + + if (iwl_mvm_check_pn(mvm, skb, queue, sta)) { kfree_skb(skb); - else + } else { + unsigned int radiotap_len = 0; + + if (rx_status->flag & RX_FLAG_RADIOTAP_HE) + radiotap_len += sizeof(struct ieee80211_radiotap_he); + if (rx_status->flag & RX_FLAG_RADIOTAP_HE_MU) + radiotap_len += sizeof(struct ieee80211_radiotap_he_mu); + __skb_push(skb, radiotap_len); ieee80211_rx_napi(mvm->hw, sta, skb, napi); + } } static void iwl_mvm_get_signal_strength(struct iwl_mvm *mvm, - struct iwl_rx_mpdu_desc *desc, - struct ieee80211_rx_status *rx_status) + struct ieee80211_rx_status *rx_status, + u32 rate_n_flags, int energy_a, + int energy_b) { - int energy_a, energy_b, max_energy; - u32 rate_flags = le32_to_cpu(desc->rate_n_flags); + int max_energy; + u32 rate_flags = rate_n_flags; - energy_a = desc->energy_a; energy_a = energy_a ? -energy_a : S8_MIN; - energy_b = desc->energy_b; energy_b = energy_b ? -energy_b : S8_MIN; max_energy = max(energy_a, energy_b); @@ -356,7 +367,8 @@ static bool iwl_mvm_is_dup(struct ieee80211_sta *sta, int queue, tid = IWL_MAX_TID_COUNT; /* If this wasn't a part of an A-MSDU the sub-frame index will be 0 */ - sub_frame_idx = desc->amsdu_info & IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK; + sub_frame_idx = desc->amsdu_info & + IWL_RX_MPDU_AMSDU_SUBFRAME_IDX_MASK; if (unlikely(ieee80211_has_retry(hdr->frame_control) && dup_data->last_seq[tid] == hdr->seq_ctrl && @@ -850,17 +862,41 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, struct ieee80211_rx_status *rx_status; struct iwl_rx_packet *pkt = rxb_addr(rxb); struct iwl_rx_mpdu_desc *desc = (void *)pkt->data; - struct ieee80211_hdr *hdr = (void *)(pkt->data + sizeof(*desc)); + struct ieee80211_hdr *hdr; u32 len = le16_to_cpu(desc->mpdu_len); - u32 rate_n_flags = le32_to_cpu(desc->rate_n_flags); + u32 rate_n_flags, gp2_on_air_rise; u16 phy_info = le16_to_cpu(desc->phy_info); struct ieee80211_sta *sta = NULL; struct sk_buff *skb; - u8 crypt_len = 0; + u8 crypt_len = 0, channel, energy_a, energy_b; + struct ieee80211_radiotap_he *he = NULL; + struct ieee80211_radiotap_he_mu *he_mu = NULL; + u32 he_type = 0xffffffff; + /* this is invalid e.g. because puncture type doesn't allow 0b11 */ +#define HE_PHY_DATA_INVAL ((u64)-1) + u64 he_phy_data = HE_PHY_DATA_INVAL; + size_t desc_size; if (unlikely(test_bit(IWL_MVM_STATUS_IN_HW_RESTART, &mvm->status))) return; + if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { + rate_n_flags = le32_to_cpu(desc->v3.rate_n_flags); + channel = desc->v3.channel; + gp2_on_air_rise = le32_to_cpu(desc->v3.gp2_on_air_rise); + energy_a = desc->v3.energy_a; + energy_b = desc->v3.energy_b; + desc_size = sizeof(*desc); + } else { + rate_n_flags = le32_to_cpu(desc->v1.rate_n_flags); + channel = desc->v1.channel; + gp2_on_air_rise = le32_to_cpu(desc->v1.gp2_on_air_rise); + energy_a = desc->v1.energy_a; + energy_b = desc->v1.energy_b; + desc_size = IWL_RX_DESC_SIZE_V1; + } + + hdr = (void *)(pkt->data + desc_size); /* Dont use dev_alloc_skb(), we'll have enough headroom once * ieee80211_hdr pulled. */ @@ -882,6 +918,51 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status = IEEE80211_SKB_RXCB(skb); + if (rate_n_flags & RATE_MCS_HE_MSK) { + static const struct ieee80211_radiotap_he known = { + .data1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_DATA_MCS_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_DATA_DCM_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_STBC_KNOWN | + IEEE80211_RADIOTAP_HE_DATA1_CODING_KNOWN), + .data2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_GI_KNOWN | + IEEE80211_RADIOTAP_HE_DATA2_TXBF_KNOWN), + }; + static const struct ieee80211_radiotap_he_mu mu_known = { + .flags1 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS_KNOWN | + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM_KNOWN | + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_SYMS_USERS_KNOWN | + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_COMP_KNOWN), + .flags2 = cpu_to_le16(IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW_KNOWN), + }; + unsigned int radiotap_len = 0; + + he = skb_put_data(skb, &known, sizeof(known)); + radiotap_len += sizeof(known); + rx_status->flag |= RX_FLAG_RADIOTAP_HE; + + he_type = rate_n_flags & RATE_MCS_HE_TYPE_MSK; + + if (phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD) { + if (mvm->trans->cfg->device_family >= + IWL_DEVICE_FAMILY_22560) + he_phy_data = le64_to_cpu(desc->v3.he_phy_data); + else + he_phy_data = le64_to_cpu(desc->v1.he_phy_data); + + if (he_type == RATE_MCS_HE_TYPE_MU) { + he_mu = skb_put_data(skb, &mu_known, + sizeof(mu_known)); + radiotap_len += sizeof(mu_known); + rx_status->flag |= RX_FLAG_RADIOTAP_HE_MU; + } + } + + /* temporarily hide the radiotap data */ + __skb_pull(skb, radiotap_len); + } + + rx_status = IEEE80211_SKB_RXCB(skb); + if (iwl_mvm_rx_crypto(mvm, hdr, rx_status, phy_info, desc, le32_to_cpu(pkt->len_n_flags), queue, &crypt_len)) { @@ -904,20 +985,80 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status->enc_flags |= RX_ENC_FLAG_SHORTPRE; if (likely(!(phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD))) { - rx_status->mactime = le64_to_cpu(desc->tsf_on_air_rise); + u64 tsf_on_air_rise; + + if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) + tsf_on_air_rise = le64_to_cpu(desc->v3.tsf_on_air_rise); + else + tsf_on_air_rise = le64_to_cpu(desc->v1.tsf_on_air_rise); + + rx_status->mactime = tsf_on_air_rise; /* TSF as indicated by the firmware is at INA time */ rx_status->flag |= RX_FLAG_MACTIME_PLCP_START; + } else if (he_type == RATE_MCS_HE_TYPE_SU) { + u64 he_phy_data; + + if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) + he_phy_data = le64_to_cpu(desc->v3.he_phy_data); + else + he_phy_data = le64_to_cpu(desc->v1.he_phy_data); + + he->data1 |= + cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_UL_DL_KNOWN); + if (FIELD_GET(IWL_RX_HE_PHY_UPLINK, + he_phy_data)) + he->data3 |= + cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA3_UL_DL); + + if (!queue && !(phy_info & IWL_RX_MPDU_PHY_AMPDU)) { + rx_status->ampdu_reference = mvm->ampdu_ref; + mvm->ampdu_ref++; + + rx_status->flag |= RX_FLAG_AMPDU_DETAILS; + rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; + if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF, + he_phy_data)) + rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT; + } + } else if (he_mu && he_phy_data != HE_PHY_DATA_INVAL) { + he_mu->flags1 |= + le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIBG_SYM_OR_USER_NUM_MASK, + he_phy_data), + IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_SYMS_USERS); + he_mu->flags1 |= + le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_DCM, + he_phy_data), + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_DCM); + he_mu->flags1 |= + le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_MCS_MASK, + he_phy_data), + IEEE80211_RADIOTAP_HE_MU_FLAGS1_SIG_B_MCS); + he_mu->flags2 |= + le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_SIGB_COMPRESSION, + he_phy_data), + IEEE80211_RADIOTAP_HE_MU_FLAGS2_SIG_B_COMP); + he_mu->flags2 |= + le16_encode_bits(FIELD_GET(IWL_RX_HE_PHY_PREAMBLE_PUNC_TYPE_MASK, + he_phy_data), + IEEE80211_RADIOTAP_HE_MU_FLAGS2_PUNC_FROM_SIG_A_BW); } - rx_status->device_timestamp = le32_to_cpu(desc->gp2_on_air_rise); - rx_status->band = desc->channel > 14 ? NL80211_BAND_5GHZ : - NL80211_BAND_2GHZ; - rx_status->freq = ieee80211_channel_to_frequency(desc->channel, + rx_status->device_timestamp = gp2_on_air_rise; + rx_status->band = channel > 14 ? NL80211_BAND_5GHZ : + NL80211_BAND_2GHZ; + rx_status->freq = ieee80211_channel_to_frequency(channel, rx_status->band); - iwl_mvm_get_signal_strength(mvm, desc, rx_status); + iwl_mvm_get_signal_strength(mvm, rx_status, rate_n_flags, energy_a, + energy_b); /* update aggregation data for monitor sake on default queue */ if (!queue && (phy_info & IWL_RX_MPDU_PHY_AMPDU)) { bool toggle_bit = phy_info & IWL_RX_MPDU_PHY_AMPDU_TOGGLE; + u64 he_phy_data; + + if (mvm->trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) + he_phy_data = le64_to_cpu(desc->v3.he_phy_data); + else + he_phy_data = le64_to_cpu(desc->v1.he_phy_data); rx_status->flag |= RX_FLAG_AMPDU_DETAILS; rx_status->ampdu_reference = mvm->ampdu_ref; @@ -925,6 +1066,15 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, if (toggle_bit != mvm->ampdu_toggle) { mvm->ampdu_ref++; mvm->ampdu_toggle = toggle_bit; + + if (he_phy_data != HE_PHY_DATA_INVAL && + he_type == RATE_MCS_HE_TYPE_MU) { + rx_status->flag |= RX_FLAG_AMPDU_EOF_BIT_KNOWN; + if (FIELD_GET(IWL_RX_HE_PHY_DELIM_EOF, + he_phy_data)) + rx_status->flag |= + RX_FLAG_AMPDU_EOF_BIT; + } } } @@ -1033,7 +1183,6 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, } } - /* Set up the HT phy flags */ switch (rate_n_flags & RATE_MCS_CHAN_WIDTH_MSK) { case RATE_MCS_CHAN_WIDTH_20: break; @@ -1048,6 +1197,70 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, break; } + if (he_type == RATE_MCS_HE_TYPE_EXT_SU && + rate_n_flags & RATE_MCS_HE_106T_MSK) { + rx_status->bw = RATE_INFO_BW_HE_RU; + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106; + } + + if (rate_n_flags & RATE_MCS_HE_MSK && + phy_info & IWL_RX_MPDU_PHY_TSF_OVERLOAD && + he_type == RATE_MCS_HE_TYPE_MU) { + /* + * Unfortunately, we have to leave the mac80211 data + * incorrect for the case that we receive an HE-MU + * transmission and *don't* have the he_mu pointer, + * i.e. we don't have the phy data (due to the bits + * being used for TSF). This shouldn't happen though + * as management frames where we need the TSF/timers + * are not be transmitted in HE-MU, I think. + */ + u8 ru = FIELD_GET(IWL_RX_HE_PHY_RU_ALLOC_MASK, he_phy_data); + u8 offs = 0; + + rx_status->bw = RATE_INFO_BW_HE_RU; + + switch (ru) { + case 0 ... 36: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_26; + offs = ru; + break; + case 37 ... 52: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_52; + offs = ru - 37; + break; + case 53 ... 60: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_106; + offs = ru - 53; + break; + case 61 ... 64: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_242; + offs = ru - 61; + break; + case 65 ... 66: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_484; + offs = ru - 65; + break; + case 67: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_996; + break; + case 68: + rx_status->he_ru = NL80211_RATE_INFO_HE_RU_ALLOC_2x996; + break; + } + he->data2 |= + le16_encode_bits(offs, + IEEE80211_RADIOTAP_HE_DATA2_RU_OFFSET); + he->data2 |= + cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_KNOWN); + if (he_phy_data & IWL_RX_HE_PHY_RU_ALLOC_SEC80) + he->data2 |= + cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_PRISEC_80_SEC); + } else if (he) { + he->data1 |= + cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA1_BW_RU_ALLOC_KNOWN); + } + if (!(rate_n_flags & RATE_MCS_CCK_MSK) && rate_n_flags & RATE_MCS_SGI_MSK) rx_status->enc_flags |= RX_ENC_FLAG_SHORT_GI; @@ -1072,6 +1285,119 @@ void iwl_mvm_rx_mpdu_mq(struct iwl_mvm *mvm, struct napi_struct *napi, rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; if (rate_n_flags & RATE_MCS_BF_MSK) rx_status->enc_flags |= RX_ENC_FLAG_BF; + } else if (he) { + u8 stbc = (rate_n_flags & RATE_MCS_STBC_MSK) >> + RATE_MCS_STBC_POS; + rx_status->nss = + ((rate_n_flags & RATE_VHT_MCS_NSS_MSK) >> + RATE_VHT_MCS_NSS_POS) + 1; + rx_status->rate_idx = rate_n_flags & RATE_VHT_MCS_RATE_CODE_MSK; + rx_status->encoding = RX_ENC_HE; + rx_status->enc_flags |= stbc << RX_ENC_FLAG_STBC_SHIFT; + if (rate_n_flags & RATE_MCS_BF_MSK) + rx_status->enc_flags |= RX_ENC_FLAG_BF; + + rx_status->he_dcm = + !!(rate_n_flags & RATE_HE_DUAL_CARRIER_MODE_MSK); + +#define CHECK_TYPE(F) \ + BUILD_BUG_ON(IEEE80211_RADIOTAP_HE_DATA1_FORMAT_ ## F != \ + (RATE_MCS_HE_TYPE_ ## F >> RATE_MCS_HE_TYPE_POS)) + + CHECK_TYPE(SU); + CHECK_TYPE(EXT_SU); + CHECK_TYPE(MU); + CHECK_TYPE(TRIG); + + he->data1 |= cpu_to_le16(he_type >> RATE_MCS_HE_TYPE_POS); + + if (rate_n_flags & RATE_MCS_BF_POS) + he->data5 |= cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA5_TXBF); + + switch ((rate_n_flags & RATE_MCS_HE_GI_LTF_MSK) >> + RATE_MCS_HE_GI_LTF_POS) { + case 0: + rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8; + break; + case 1: + rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8; + break; + case 2: + rx_status->he_gi = NL80211_RATE_INFO_HE_GI_1_6; + break; + case 3: + if (rate_n_flags & RATE_MCS_SGI_MSK) + rx_status->he_gi = NL80211_RATE_INFO_HE_GI_0_8; + else + rx_status->he_gi = NL80211_RATE_INFO_HE_GI_3_2; + break; + } + + switch (he_type) { + case RATE_MCS_HE_TYPE_SU: { + u16 val; + + /* LTF syms correspond to streams */ + he->data2 |= + cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN); + switch (rx_status->nss) { + case 1: + val = 0; + break; + case 2: + val = 1; + break; + case 3: + case 4: + val = 2; + break; + case 5: + case 6: + val = 3; + break; + case 7: + case 8: + val = 4; + break; + default: + WARN_ONCE(1, "invalid nss: %d\n", + rx_status->nss); + val = 0; + } + he->data5 |= + le16_encode_bits(val, + IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS); + } + break; + case RATE_MCS_HE_TYPE_MU: { + u16 val; + u64 he_phy_data; + + if (mvm->trans->cfg->device_family >= + IWL_DEVICE_FAMILY_22560) + he_phy_data = le64_to_cpu(desc->v3.he_phy_data); + else + he_phy_data = le64_to_cpu(desc->v1.he_phy_data); + + if (he_phy_data == HE_PHY_DATA_INVAL) + break; + + val = FIELD_GET(IWL_RX_HE_PHY_HE_LTF_NUM_MASK, + he_phy_data); + + he->data2 |= + cpu_to_le16(IEEE80211_RADIOTAP_HE_DATA2_NUM_LTF_SYMS_KNOWN); + he->data5 |= + cpu_to_le16(FIELD_PREP( + IEEE80211_RADIOTAP_HE_DATA5_NUM_LTF_SYMS, + val)); + } + break; + case RATE_MCS_HE_TYPE_EXT_SU: + case RATE_MCS_HE_TYPE_TRIG: + /* not supported yet */ + break; + } } else { int rate = iwl_mvm_legacy_rate_to_mac80211_idx(rate_n_flags, rx_status->band); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c index 9263b9aa8b72..18db1ed92d9b 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.c @@ -2184,7 +2184,7 @@ static void iwl_mvm_free_reorder(struct iwl_mvm *mvm, static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm, struct iwl_mvm_baid_data *data, - u16 ssn, u8 buf_size) + u16 ssn, u16 buf_size) { int i; @@ -2211,7 +2211,7 @@ static void iwl_mvm_init_reorder_buffer(struct iwl_mvm *mvm, } int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - int tid, u16 ssn, bool start, u8 buf_size, u16 timeout) + int tid, u16 ssn, bool start, u16 buf_size, u16 timeout) { struct iwl_mvm_sta *mvm_sta = iwl_mvm_sta_from_mac80211(sta); struct iwl_mvm_add_sta_cmd cmd = {}; @@ -2273,7 +2273,7 @@ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, if (start) { cmd.add_immediate_ba_tid = (u8) tid; cmd.add_immediate_ba_ssn = cpu_to_le16(ssn); - cmd.rx_ba_window = cpu_to_le16((u16)buf_size); + cmd.rx_ba_window = cpu_to_le16(buf_size); } else { cmd.remove_immediate_ba_tid = (u8) tid; } @@ -2559,7 +2559,7 @@ out: } int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u16 tid, u8 buf_size, + struct ieee80211_sta *sta, u16 tid, u16 buf_size, bool amsdu) { struct iwl_mvm_sta *mvmsta = iwl_mvm_sta_from_mac80211(sta); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h index 1c43ea8dd8cc..0fc211108149 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/sta.h +++ b/drivers/net/wireless/intel/iwlwifi/mvm/sta.h @@ -412,7 +412,7 @@ struct iwl_mvm_sta { u32 tfd_queue_msk; u32 mac_id_n_color; u16 tid_disable_agg; - u8 max_agg_bufsize; + u16 max_agg_bufsize; enum iwl_sta_type sta_type; enum ieee80211_sta_state sta_state; bool bt_reduced_txpower; @@ -518,11 +518,11 @@ void iwl_mvm_rx_eosp_notif(struct iwl_mvm *mvm, /* AMPDU */ int iwl_mvm_sta_rx_agg(struct iwl_mvm *mvm, struct ieee80211_sta *sta, - int tid, u16 ssn, bool start, u8 buf_size, u16 timeout); + int tid, u16 ssn, bool start, u16 buf_size, u16 timeout); int iwl_mvm_sta_tx_agg_start(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid, u16 *ssn); int iwl_mvm_sta_tx_agg_oper(struct iwl_mvm *mvm, struct ieee80211_vif *vif, - struct ieee80211_sta *sta, u16 tid, u8 buf_size, + struct ieee80211_sta *sta, u16 tid, u16 buf_size, bool amsdu); int iwl_mvm_sta_tx_agg_stop(struct iwl_mvm *mvm, struct ieee80211_vif *vif, struct ieee80211_sta *sta, u16 tid); diff --git a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c index cf2591f2ac23..ff193dca2020 100644 --- a/drivers/net/wireless/intel/iwlwifi/mvm/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/mvm/tx.c @@ -484,13 +484,15 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, /* Make sure we zero enough of dev_cmd */ BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) > sizeof(*tx_cmd)); + BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen3) > sizeof(*tx_cmd)); memset(dev_cmd, 0, sizeof(dev_cmd->hdr) + sizeof(*tx_cmd)); dev_cmd->hdr.cmd = TX_CMD; if (iwl_mvm_has_new_tx_api(mvm)) { - struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload; u16 offload_assist = 0; + u32 rate_n_flags = 0; + u16 flags = 0; if (ieee80211_is_data_qos(hdr->frame_control)) { u8 *qc = ieee80211_get_qos_ctl(hdr); @@ -507,25 +509,43 @@ iwl_mvm_set_tx_params(struct iwl_mvm *mvm, struct sk_buff *skb, !(offload_assist & BIT(TX_CMD_OFFLD_AMSDU))) offload_assist |= BIT(TX_CMD_OFFLD_PAD); - cmd->offload_assist |= cpu_to_le16(offload_assist); + if (!info->control.hw_key) + flags |= IWL_TX_FLAGS_ENCRYPT_DIS; - /* Total # bytes to be transmitted */ - cmd->len = cpu_to_le16((u16)skb->len); + /* For data packets rate info comes from the fw */ + if (!(ieee80211_is_data(hdr->frame_control) && sta)) { + flags |= IWL_TX_FLAGS_CMD_RATE; + rate_n_flags = iwl_mvm_get_tx_rate(mvm, info, sta); + } - /* Copy MAC header from skb into command buffer */ - memcpy(cmd->hdr, hdr, hdrlen); + if (mvm->trans->cfg->device_family >= + IWL_DEVICE_FAMILY_22560) { + struct iwl_tx_cmd_gen3 *cmd = (void *)dev_cmd->payload; - if (!info->control.hw_key) - cmd->flags |= cpu_to_le32(IWL_TX_FLAGS_ENCRYPT_DIS); + cmd->offload_assist |= cpu_to_le32(offload_assist); - /* For data packets rate info comes from the fw */ - if (ieee80211_is_data(hdr->frame_control) && sta) - goto out; + /* Total # bytes to be transmitted */ + cmd->len = cpu_to_le16((u16)skb->len); - cmd->flags |= cpu_to_le32(IWL_TX_FLAGS_CMD_RATE); - cmd->rate_n_flags = - cpu_to_le32(iwl_mvm_get_tx_rate(mvm, info, sta)); + /* Copy MAC header from skb into command buffer */ + memcpy(cmd->hdr, hdr, hdrlen); + cmd->flags = cpu_to_le16(flags); + cmd->rate_n_flags = cpu_to_le32(rate_n_flags); + } else { + struct iwl_tx_cmd_gen2 *cmd = (void *)dev_cmd->payload; + + cmd->offload_assist |= cpu_to_le16(offload_assist); + + /* Total # bytes to be transmitted */ + cmd->len = cpu_to_le16((u16)skb->len); + + /* Copy MAC header from skb into command buffer */ + memcpy(cmd->hdr, hdr, hdrlen); + + cmd->flags = cpu_to_le32(flags); + cmd->rate_n_flags = cpu_to_le32(rate_n_flags); + } goto out; } diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c new file mode 100644 index 000000000000..2146fda8da2f --- /dev/null +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info-gen3.c @@ -0,0 +1,207 @@ +/****************************************************************************** + * + * This file is provided under a dual BSD/GPLv2 license. When using or + * redistributing this file, you may do so under either license. + * + * GPL LICENSE SUMMARY + * + * Copyright(c) 2018 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of version 2 of the GNU General Public License as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, but + * WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU + * General Public License for more details. + * + * BSD LICENSE + * + * Copyright(c) 2018 Intel Corporation + * All rights reserved. + * + * Redistribution and use in source and binary forms, with or without + * modification, are permitted provided that the following conditions + * are met: + * + * * Redistributions of source code must retain the above copyright + * notice, this list of conditions and the following disclaimer. + * * Redistributions in binary form must reproduce the above copyright + * notice, this list of conditions and the following disclaimer in + * the documentation and/or other materials provided with the + * distribution. + * * Neither the name Intel Corporation nor the names of its + * contributors may be used to endorse or promote products derived + * from this software without specific prior written permission. + * + * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS + * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT + * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR + * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT + * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, + * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT + * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, + * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY + * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT + * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE + * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + * + *****************************************************************************/ + +#include "iwl-trans.h" +#include "iwl-fh.h" +#include "iwl-context-info-gen3.h" +#include "internal.h" +#include "iwl-prph.h" + +int iwl_pcie_ctxt_info_gen3_init(struct iwl_trans *trans, + const struct fw_img *fw) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_context_info_gen3 *ctxt_info_gen3; + struct iwl_prph_scratch *prph_scratch; + struct iwl_prph_scratch_ctrl_cfg *prph_sc_ctrl; + struct iwl_prph_info *prph_info; + void *iml_img; + u32 control_flags = 0; + int ret; + + /* Allocate prph scratch */ + prph_scratch = dma_alloc_coherent(trans->dev, sizeof(*prph_scratch), + &trans_pcie->prph_scratch_dma_addr, + GFP_KERNEL); + if (!prph_scratch) + return -ENOMEM; + + prph_sc_ctrl = &prph_scratch->ctrl_cfg; + + prph_sc_ctrl->version.version = 0; + prph_sc_ctrl->version.mac_id = + cpu_to_le16((u16)iwl_read32(trans, CSR_HW_REV)); + prph_sc_ctrl->version.size = cpu_to_le16(sizeof(*prph_scratch) / 4); + + control_flags = IWL_PRPH_SCRATCH_RB_SIZE_4K | + IWL_PRPH_SCRATCH_MTR_MODE | + (IWL_PRPH_MTR_FORMAT_256B & + IWL_PRPH_SCRATCH_MTR_FORMAT) | + IWL_PRPH_SCRATCH_EARLY_DEBUG_EN | + IWL_PRPH_SCRATCH_EDBG_DEST_DRAM; + prph_sc_ctrl->control.control_flags = cpu_to_le32(control_flags); + + /* initialize RX default queue */ + prph_sc_ctrl->rbd_cfg.free_rbd_addr = + cpu_to_le64(trans_pcie->rxq->bd_dma); + + /* Configure debug, for integration */ + iwl_pcie_alloc_fw_monitor(trans, 0); + prph_sc_ctrl->hwm_cfg.hwm_base_addr = + cpu_to_le64(trans_pcie->fw_mon_phys); + prph_sc_ctrl->hwm_cfg.hwm_size = + cpu_to_le32(trans_pcie->fw_mon_size); + + /* allocate ucode sections in dram and set addresses */ + ret = iwl_pcie_init_fw_sec(trans, fw, &prph_scratch->dram); + if (ret) { + dma_free_coherent(trans->dev, + sizeof(*prph_scratch), + prph_scratch, + trans_pcie->prph_scratch_dma_addr); + return ret; + } + + /* Allocate prph information + * currently we don't assign to the prph info anything, but it would get + * assigned later */ + prph_info = dma_alloc_coherent(trans->dev, sizeof(*prph_info), + &trans_pcie->prph_info_dma_addr, + GFP_KERNEL); + if (!prph_info) + return -ENOMEM; + + /* Allocate context info */ + ctxt_info_gen3 = dma_alloc_coherent(trans->dev, + sizeof(*ctxt_info_gen3), + &trans_pcie->ctxt_info_dma_addr, + GFP_KERNEL); + if (!ctxt_info_gen3) + return -ENOMEM; + + ctxt_info_gen3->prph_info_base_addr = + cpu_to_le64(trans_pcie->prph_info_dma_addr); + ctxt_info_gen3->prph_scratch_base_addr = + cpu_to_le64(trans_pcie->prph_scratch_dma_addr); + ctxt_info_gen3->prph_scratch_size = + cpu_to_le32(sizeof(*prph_scratch)); + ctxt_info_gen3->cr_head_idx_arr_base_addr = + cpu_to_le64(trans_pcie->rxq->rb_stts_dma); + ctxt_info_gen3->tr_tail_idx_arr_base_addr = + cpu_to_le64(trans_pcie->rxq->tr_tail_dma); + ctxt_info_gen3->cr_tail_idx_arr_base_addr = + cpu_to_le64(trans_pcie->rxq->cr_tail_dma); + ctxt_info_gen3->cr_idx_arr_size = + cpu_to_le16(IWL_NUM_OF_COMPLETION_RINGS); + ctxt_info_gen3->tr_idx_arr_size = + cpu_to_le16(IWL_NUM_OF_TRANSFER_RINGS); + ctxt_info_gen3->mtr_base_addr = + cpu_to_le64(trans_pcie->txq[trans_pcie->cmd_queue]->dma_addr); + ctxt_info_gen3->mcr_base_addr = + cpu_to_le64(trans_pcie->rxq->used_bd_dma); + ctxt_info_gen3->mtr_size = + cpu_to_le16(TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS)); + ctxt_info_gen3->mcr_size = + cpu_to_le16(RX_QUEUE_CB_SIZE(MQ_RX_TABLE_SIZE)); + + trans_pcie->ctxt_info_gen3 = ctxt_info_gen3; + trans_pcie->prph_info = prph_info; + trans_pcie->prph_scratch = prph_scratch; + + /* Allocate IML */ + iml_img = dma_alloc_coherent(trans->dev, trans->iml_len, + &trans_pcie->iml_dma_addr, GFP_KERNEL); + if (!iml_img) + return -ENOMEM; + + memcpy(iml_img, trans->iml, trans->iml_len); + + iwl_enable_interrupts(trans); + + /* kick FW self load */ + iwl_write64(trans, CSR_CTXT_INFO_ADDR, + trans_pcie->ctxt_info_dma_addr); + iwl_write64(trans, CSR_IML_DATA_ADDR, + trans_pcie->iml_dma_addr); + iwl_write32(trans, CSR_IML_SIZE_ADDR, trans->iml_len); + iwl_set_bit(trans, CSR_CTXT_INFO_BOOT_CTRL, CSR_AUTO_FUNC_BOOT_ENA); + iwl_set_bit(trans, CSR_GP_CNTRL, CSR_AUTO_FUNC_INIT); + + return 0; +} + +void iwl_pcie_ctxt_info_gen3_free(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (!trans_pcie->ctxt_info_gen3) + return; + + dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info_gen3), + trans_pcie->ctxt_info_gen3, + trans_pcie->ctxt_info_dma_addr); + trans_pcie->ctxt_info_dma_addr = 0; + trans_pcie->ctxt_info_gen3 = NULL; + + iwl_pcie_ctxt_info_free_fw_img(trans); + + dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_scratch), + trans_pcie->prph_scratch, + trans_pcie->prph_scratch_dma_addr); + trans_pcie->prph_scratch_dma_addr = 0; + trans_pcie->prph_scratch = NULL; + + dma_free_coherent(trans->dev, sizeof(*trans_pcie->prph_info), + trans_pcie->prph_info, + trans_pcie->prph_info_dma_addr); + trans_pcie->prph_info_dma_addr = 0; + trans_pcie->prph_info = NULL; +} diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c index 3fc4343581ee..b2cd7ef5fc3a 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/ctxt-info.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -19,6 +20,7 @@ * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -55,57 +57,6 @@ #include "internal.h" #include "iwl-prph.h" -static int iwl_pcie_get_num_sections(const struct fw_img *fw, - int start) -{ - int i = 0; - - while (start < fw->num_sec && - fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION && - fw->sec[start].offset != PAGING_SEPARATOR_SECTION) { - start++; - i++; - } - - return i; -} - -static int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans, - const struct fw_desc *sec, - struct iwl_dram_data *dram) -{ - dram->block = dma_alloc_coherent(trans->dev, sec->len, - &dram->physical, - GFP_KERNEL); - if (!dram->block) - return -ENOMEM; - - dram->size = sec->len; - memcpy(dram->block, sec->data, sec->len); - - return 0; -} - -static void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans) -{ - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_self_init_dram *dram = &trans_pcie->init_dram; - int i; - - if (!dram->fw) { - WARN_ON(dram->fw_cnt); - return; - } - - for (i = 0; i < dram->fw_cnt; i++) - dma_free_coherent(trans->dev, dram->fw[i].size, - dram->fw[i].block, dram->fw[i].physical); - - kfree(dram->fw); - dram->fw_cnt = 0; - dram->fw = NULL; -} - void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -128,13 +79,12 @@ void iwl_pcie_ctxt_info_free_paging(struct iwl_trans *trans) dram->paging = NULL; } -static int iwl_pcie_ctxt_info_init_fw_sec(struct iwl_trans *trans, - const struct fw_img *fw, - struct iwl_context_info *ctxt_info) +int iwl_pcie_init_fw_sec(struct iwl_trans *trans, + const struct fw_img *fw, + struct iwl_context_info_dram *ctxt_dram) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_self_init_dram *dram = &trans_pcie->init_dram; - struct iwl_context_info_dram *ctxt_dram = &ctxt_info->dram; int i, ret, lmac_cnt, umac_cnt, paging_cnt; if (WARN(dram->paging, @@ -247,7 +197,7 @@ int iwl_pcie_ctxt_info_init(struct iwl_trans *trans, TFD_QUEUE_CB_SIZE(TFD_CMD_SLOTS); /* allocate ucode sections in dram and set addresses */ - ret = iwl_pcie_ctxt_info_init_fw_sec(trans, fw, ctxt_info); + ret = iwl_pcie_init_fw_sec(trans, fw, &ctxt_info->dram); if (ret) { dma_free_coherent(trans->dev, sizeof(*trans_pcie->ctxt_info), ctxt_info, trans_pcie->ctxt_info_dma_addr); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c index 8520523b91b4..562cc79288a6 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/drv.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/drv.c @@ -828,19 +828,32 @@ static const struct pci_device_id iwl_hw_card_ids[] = { {IWL_PCI_DEVICE(0xA370, 0x42A4, iwl9462_2ac_cfg_soc)}, /* 22000 Series */ - {IWL_PCI_DEVICE(0x2720, 0x0A10, iwl22000_2ac_cfg_hr_cdb)}, - {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ac_cfg_jf)}, {IWL_PCI_DEVICE(0x2720, 0x0000, iwl22000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x2720, 0x0040, iwl22000_2ax_cfg_hr)}, {IWL_PCI_DEVICE(0x2720, 0x0078, iwl22000_2ax_cfg_hr)}, {IWL_PCI_DEVICE(0x2720, 0x0070, iwl22000_2ac_cfg_hr_cdb)}, {IWL_PCI_DEVICE(0x2720, 0x0030, iwl22000_2ac_cfg_hr_cdb)}, {IWL_PCI_DEVICE(0x2720, 0x1080, iwl22000_2ax_cfg_hr)}, {IWL_PCI_DEVICE(0x2720, 0x0090, iwl22000_2ac_cfg_hr_cdb)}, {IWL_PCI_DEVICE(0x2720, 0x0310, iwl22000_2ac_cfg_hr_cdb)}, - {IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22000_2ax_cfg_hr)}, - {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwl22000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x34F0, 0x0040, iwl22000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x34F0, 0x0070, iwl22000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x34F0, 0x0078, iwl22000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x34F0, 0x0310, iwl22000_2ac_cfg_jf)}, + {IWL_PCI_DEVICE(0x40C0, 0x0000, iwl22560_2ax_cfg_su_cdb)}, + {IWL_PCI_DEVICE(0x40C0, 0x0010, iwl22560_2ax_cfg_su_cdb)}, + {IWL_PCI_DEVICE(0x40c0, 0x0090, iwl22560_2ax_cfg_su_cdb)}, + {IWL_PCI_DEVICE(0x40C0, 0x0310, iwl22560_2ax_cfg_su_cdb)}, + {IWL_PCI_DEVICE(0x40C0, 0x0A10, iwl22560_2ax_cfg_su_cdb)}, + {IWL_PCI_DEVICE(0x43F0, 0x0040, iwl22000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x43F0, 0x0070, iwl22000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0x43F0, 0x0078, iwl22000_2ax_cfg_hr)}, {IWL_PCI_DEVICE(0xA0F0, 0x0000, iwl22000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0040, iwl22000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0070, iwl22000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0078, iwl22000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0xA0F0, 0x00B0, iwl22000_2ax_cfg_hr)}, + {IWL_PCI_DEVICE(0xA0F0, 0x0A10, iwl22000_2ax_cfg_hr)}, #endif /* CONFIG_IWLMVM */ @@ -1003,6 +1016,10 @@ static int iwl_pci_resume(struct device *device) if (!trans->op_mode) return 0; + /* In WOWLAN, let iwl_trans_pcie_d3_resume do the rest of the work */ + if (test_bit(STATUS_DEVICE_ENABLED, &trans->status)) + return 0; + /* reconfigure the MSI-X mapping to get the correct IRQ for rfkill */ iwl_pcie_conf_msix_hw(trans_pcie); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h index 45ea32796cda..b63d44b7cd7c 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/internal.h +++ b/drivers/net/wireless/intel/iwlwifi/pcie/internal.h @@ -3,6 +3,7 @@ * Copyright(c) 2003 - 2015 Intel Corporation. All rights reserved. * Copyright(c) 2013 - 2015 Intel Mobile Communications GmbH * Copyright(c) 2016 - 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * Portions of this file are derived from the ipw3945 project, as well * as portions of the ieee80211 subsystem header files. @@ -17,8 +18,7 @@ * more details. * * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * this program. * * The full GNU General Public License is included in this distribution in the * file called LICENSE. @@ -45,6 +45,7 @@ #include "iwl-debug.h" #include "iwl-io.h" #include "iwl-op-mode.h" +#include "iwl-drv.h" /* We need 2 entries for the TX command and header, and another one might * be needed for potential data in the SKB's head. The remaining ones can @@ -59,6 +60,7 @@ #define RX_POST_REQ_ALLOC 2 #define RX_CLAIM_REQ_ALLOC 8 #define RX_PENDING_WATERMARK 16 +#define FIRST_RX_QUEUE 512 struct iwl_host_cmd; @@ -71,6 +73,7 @@ struct iwl_host_cmd; * @page: driver's pointer to the rxb page * @invalid: rxb is in driver ownership - not owned by HW * @vid: index of this rxb in the global table + * @size: size used from the buffer */ struct iwl_rx_mem_buffer { dma_addr_t page_dma; @@ -78,6 +81,7 @@ struct iwl_rx_mem_buffer { u16 vid; bool invalid; struct list_head list; + u32 size; }; /** @@ -98,14 +102,121 @@ struct isr_statistics { u32 unhandled; }; +#define IWL_CD_STTS_OPTIMIZED_POS 0 +#define IWL_CD_STTS_OPTIMIZED_MSK 0x01 +#define IWL_CD_STTS_TRANSFER_STATUS_POS 1 +#define IWL_CD_STTS_TRANSFER_STATUS_MSK 0x0E +#define IWL_CD_STTS_WIFI_STATUS_POS 4 +#define IWL_CD_STTS_WIFI_STATUS_MSK 0xF0 + +/** + * enum iwl_completion_desc_transfer_status - transfer status (bits 1-3) + * @IWL_CD_STTS_END_TRANSFER: successful transfer complete. + * In sniffer mode, when split is used, set in last CD completion. (RX) + * @IWL_CD_STTS_OVERFLOW: In sniffer mode, when using split - used for + * all CD completion. (RX) + * @IWL_CD_STTS_ABORTED: CR abort / close flow. (RX) + */ +enum iwl_completion_desc_transfer_status { + IWL_CD_STTS_UNUSED, + IWL_CD_STTS_UNUSED_2, + IWL_CD_STTS_END_TRANSFER, + IWL_CD_STTS_OVERFLOW, + IWL_CD_STTS_ABORTED, + IWL_CD_STTS_ERROR, +}; + +/** + * enum iwl_completion_desc_wifi_status - wifi status (bits 4-7) + * @IWL_CD_STTS_VALID: the packet is valid (RX) + * @IWL_CD_STTS_FCS_ERR: frame check sequence error (RX) + * @IWL_CD_STTS_SEC_KEY_ERR: error handling the security key of rx (RX) + * @IWL_CD_STTS_DECRYPTION_ERR: error decrypting the frame (RX) + * @IWL_CD_STTS_DUP: duplicate packet (RX) + * @IWL_CD_STTS_ICV_MIC_ERR: MIC error (RX) + * @IWL_CD_STTS_INTERNAL_SNAP_ERR: problems removing the snap (RX) + * @IWL_CD_STTS_SEC_PORT_FAIL: security port fail (RX) + * @IWL_CD_STTS_BA_OLD_SN: block ack received old SN (RX) + * @IWL_CD_STTS_QOS_NULL: QoS null packet (RX) + * @IWL_CD_STTS_MAC_HDR_ERR: MAC header conversion error (RX) + * @IWL_CD_STTS_MAX_RETRANS: reached max number of retransmissions (TX) + * @IWL_CD_STTS_EX_LIFETIME: exceeded lifetime (TX) + * @IWL_CD_STTS_NOT_USED: completed but not used (RX) + * @IWL_CD_STTS_REPLAY_ERR: pn check failed, replay error (RX) + */ +enum iwl_completion_desc_wifi_status { + IWL_CD_STTS_VALID, + IWL_CD_STTS_FCS_ERR, + IWL_CD_STTS_SEC_KEY_ERR, + IWL_CD_STTS_DECRYPTION_ERR, + IWL_CD_STTS_DUP, + IWL_CD_STTS_ICV_MIC_ERR, + IWL_CD_STTS_INTERNAL_SNAP_ERR, + IWL_CD_STTS_SEC_PORT_FAIL, + IWL_CD_STTS_BA_OLD_SN, + IWL_CD_STTS_QOS_NULL, + IWL_CD_STTS_MAC_HDR_ERR, + IWL_CD_STTS_MAX_RETRANS, + IWL_CD_STTS_EX_LIFETIME, + IWL_CD_STTS_NOT_USED, + IWL_CD_STTS_REPLAY_ERR, +}; + +#define IWL_RX_TD_TYPE_MSK 0xff000000 +#define IWL_RX_TD_SIZE_MSK 0x00ffffff +#define IWL_RX_TD_SIZE_2K BIT(11) +#define IWL_RX_TD_TYPE 0 + +/** + * struct iwl_rx_transfer_desc - transfer descriptor + * @type_n_size: buffer type (bit 0: external buff valid, + * bit 1: optional footer valid, bit 2-7: reserved) + * and buffer size + * @addr: ptr to free buffer start address + * @rbid: unique tag of the buffer + * @reserved: reserved + */ +struct iwl_rx_transfer_desc { + __le32 type_n_size; + __le64 addr; + __le16 rbid; + __le16 reserved; +} __packed; + +#define IWL_RX_CD_SIZE 0xffffff00 + +/** + * struct iwl_rx_completion_desc - completion descriptor + * @type: buffer type (bit 0: external buff valid, + * bit 1: optional footer valid, bit 2-7: reserved) + * @status: status of the completion + * @reserved1: reserved + * @rbid: unique tag of the received buffer + * @size: buffer size, masked by IWL_RX_CD_SIZE + * @reserved2: reserved + */ +struct iwl_rx_completion_desc { + u8 type; + u8 status; + __le16 reserved1; + __le16 rbid; + __le32 size; + u8 reserved2[22]; +} __packed; + /** * struct iwl_rxq - Rx queue * @id: queue index * @bd: driver's pointer to buffer of receive buffer descriptors (rbd). * Address size is 32 bit in pre-9000 devices and 64 bit in 9000 devices. + * In 22560 devices it is a pointer to a list of iwl_rx_transfer_desc's * @bd_dma: bus address of buffer of receive buffer descriptors (rbd) * @ubd: driver's pointer to buffer of used receive buffer descriptors (rbd) * @ubd_dma: physical address of buffer of used receive buffer descriptors (rbd) + * @tr_tail: driver's pointer to the transmission ring tail buffer + * @tr_tail_dma: physical address of the buffer for the transmission ring tail + * @cr_tail: driver's pointer to the completion ring tail buffer + * @cr_tail_dma: physical address of the buffer for the completion ring tail * @read: Shared index to newest available Rx buffer * @write: Shared index to oldest written Rx packet * @free_count: Number of pre-allocated buffers in rx_free @@ -125,8 +236,16 @@ struct iwl_rxq { int id; void *bd; dma_addr_t bd_dma; - __le32 *used_bd; + union { + void *used_bd; + __le32 *bd_32; + struct iwl_rx_completion_desc *cd; + }; dma_addr_t used_bd_dma; + __le16 *tr_tail; + dma_addr_t tr_tail_dma; + __le16 *cr_tail; + dma_addr_t cr_tail_dma; u32 read; u32 write; u32 free_count; @@ -136,7 +255,7 @@ struct iwl_rxq { struct list_head rx_free; struct list_head rx_used; bool need_update; - struct iwl_rb_status *rb_stts; + void *rb_stts; dma_addr_t rb_stts_dma; spinlock_t lock; struct napi_struct napi; @@ -175,18 +294,36 @@ struct iwl_dma_ptr { * iwl_queue_inc_wrap - increment queue index, wrap back to beginning * @index -- current index */ -static inline int iwl_queue_inc_wrap(int index) +static inline int iwl_queue_inc_wrap(struct iwl_trans *trans, int index) { - return ++index & (TFD_QUEUE_SIZE_MAX - 1); + return ++index & (trans->cfg->base_params->max_tfd_queue_size - 1); +} + +/** + * iwl_get_closed_rb_stts - get closed rb stts from different structs + * @rxq - the rxq to get the rb stts from + */ +static inline __le16 iwl_get_closed_rb_stts(struct iwl_trans *trans, + struct iwl_rxq *rxq) +{ + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { + __le16 *rb_stts = rxq->rb_stts; + + return READ_ONCE(*rb_stts); + } else { + struct iwl_rb_status *rb_stts = rxq->rb_stts; + + return READ_ONCE(rb_stts->closed_rb_num); + } } /** * iwl_queue_dec_wrap - decrement queue index, wrap back to end * @index -- current index */ -static inline int iwl_queue_dec_wrap(int index) +static inline int iwl_queue_dec_wrap(struct iwl_trans *trans, int index) { - return --index & (TFD_QUEUE_SIZE_MAX - 1); + return --index & (trans->cfg->base_params->max_tfd_queue_size - 1); } struct iwl_cmd_meta { @@ -315,6 +452,18 @@ enum iwl_shared_irq_flags { }; /** + * enum iwl_image_response_code - image response values + * @IWL_IMAGE_RESP_DEF: the default value of the register + * @IWL_IMAGE_RESP_SUCCESS: iml was read successfully + * @IWL_IMAGE_RESP_FAIL: iml reading failed + */ +enum iwl_image_response_code { + IWL_IMAGE_RESP_DEF = 0, + IWL_IMAGE_RESP_SUCCESS = 1, + IWL_IMAGE_RESP_FAIL = 2, +}; + +/** * struct iwl_dram_data * @physical: page phy pointer * @block: pointer to the allocated block/page @@ -347,6 +496,12 @@ struct iwl_self_init_dram { * @global_table: table mapping received VID from hw to rxb * @rba: allocator for RX replenishing * @ctxt_info: context information for FW self init + * @ctxt_info_gen3: context information for gen3 devices + * @prph_info: prph info for self init + * @prph_scratch: prph scratch for self init + * @ctxt_info_dma_addr: dma addr of context information + * @prph_info_dma_addr: dma addr of prph info + * @prph_scratch_dma_addr: dma addr of prph scratch * @ctxt_info_dma_addr: dma addr of context information * @init_dram: DRAM data of firmware image (including paging). * Context information addresses will be taken from here. @@ -391,8 +546,16 @@ struct iwl_trans_pcie { struct iwl_rx_mem_buffer rx_pool[RX_POOL_SIZE]; struct iwl_rx_mem_buffer *global_table[RX_POOL_SIZE]; struct iwl_rb_allocator rba; - struct iwl_context_info *ctxt_info; + union { + struct iwl_context_info *ctxt_info; + struct iwl_context_info_gen3 *ctxt_info_gen3; + }; + struct iwl_prph_info *prph_info; + struct iwl_prph_scratch *prph_scratch; dma_addr_t ctxt_info_dma_addr; + dma_addr_t prph_info_dma_addr; + dma_addr_t prph_scratch_dma_addr; + dma_addr_t iml_dma_addr; struct iwl_self_init_dram init_dram; struct iwl_trans *trans; @@ -477,6 +640,20 @@ IWL_TRANS_GET_PCIE_TRANS(struct iwl_trans *trans) return (void *)trans->trans_specific; } +static inline void iwl_pcie_clear_irq(struct iwl_trans *trans, + struct msix_entry *entry) +{ + /* + * Before sending the interrupt the HW disables it to prevent + * a nested interrupt. This is done by writing 1 to the corresponding + * bit in the mask register. After handling the interrupt, it should be + * re-enabled by clearing this bit. This register is defined as + * write 1 clear (W1C) register, meaning that it's being clear + * by writing 1 to the bit. + */ + iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry)); +} + static inline struct iwl_trans * iwl_trans_pcie_get_trans(struct iwl_trans_pcie *trans_pcie) { @@ -504,6 +681,11 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id); irqreturn_t iwl_pcie_irq_rx_msix_handler(int irq, void *dev_id); int iwl_pcie_rx_stop(struct iwl_trans *trans); void iwl_pcie_rx_free(struct iwl_trans *trans); +void iwl_pcie_free_rbs_pool(struct iwl_trans *trans); +void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq); +int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget); +void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, + struct iwl_rxq *rxq); /***************************************************** * ICT - interrupt handling @@ -588,6 +770,60 @@ static inline void _iwl_disable_interrupts(struct iwl_trans *trans) IWL_DEBUG_ISR(trans, "Disabled interrupts\n"); } +#define IWL_NUM_OF_COMPLETION_RINGS 31 +#define IWL_NUM_OF_TRANSFER_RINGS 527 + +static inline int iwl_pcie_get_num_sections(const struct fw_img *fw, + int start) +{ + int i = 0; + + while (start < fw->num_sec && + fw->sec[start].offset != CPU1_CPU2_SEPARATOR_SECTION && + fw->sec[start].offset != PAGING_SEPARATOR_SECTION) { + start++; + i++; + } + + return i; +} + +static inline int iwl_pcie_ctxt_info_alloc_dma(struct iwl_trans *trans, + const struct fw_desc *sec, + struct iwl_dram_data *dram) +{ + dram->block = dma_alloc_coherent(trans->dev, sec->len, + &dram->physical, + GFP_KERNEL); + if (!dram->block) + return -ENOMEM; + + dram->size = sec->len; + memcpy(dram->block, sec->data, sec->len); + + return 0; +} + +static inline void iwl_pcie_ctxt_info_free_fw_img(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_self_init_dram *dram = &trans_pcie->init_dram; + int i; + + if (!dram->fw) { + WARN_ON(dram->fw_cnt); + return; + } + + for (i = 0; i < dram->fw_cnt; i++) + dma_free_coherent(trans->dev, dram->fw[i].size, + dram->fw[i].block, dram->fw[i].physical); + + kfree(dram->fw); + dram->fw_cnt = 0; + dram->fw = NULL; +} + static inline void iwl_disable_interrupts(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -660,7 +896,7 @@ static inline void iwl_enable_fw_load_int(struct iwl_trans *trans) } } -static inline u8 iwl_pcie_get_cmd_index(struct iwl_txq *q, u32 index) +static inline u16 iwl_pcie_get_cmd_index(const struct iwl_txq *q, u32 index) { return index & (q->n_window - 1); } @@ -676,6 +912,29 @@ static inline void *iwl_pcie_get_tfd(struct iwl_trans *trans, return txq->tfds + trans_pcie->tfd_size * idx; } +static inline const char *queue_name(struct device *dev, + struct iwl_trans_pcie *trans_p, int i) +{ + if (trans_p->shared_vec_mask) { + int vec = trans_p->shared_vec_mask & + IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0; + + if (i == 0) + return DRV_NAME ": shared IRQ"; + + return devm_kasprintf(dev, GFP_KERNEL, + DRV_NAME ": queue %d", i + vec); + } + if (i == 0) + return DRV_NAME ": default queue"; + + if (i == trans_p->alloc_vecs - 1) + return DRV_NAME ": exception"; + + return devm_kasprintf(dev, GFP_KERNEL, + DRV_NAME ": queue %d", i); +} + static inline void iwl_enable_rfkill_int(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -730,9 +989,13 @@ static inline void iwl_stop_queue(struct iwl_trans *trans, static inline bool iwl_queue_used(const struct iwl_txq *q, int i) { - return q->write_ptr >= q->read_ptr ? - (i >= q->read_ptr && i < q->write_ptr) : - !(i < q->read_ptr && i >= q->write_ptr); + int index = iwl_pcie_get_cmd_index(q, i); + int r = iwl_pcie_get_cmd_index(q, q->read_ptr); + int w = iwl_pcie_get_cmd_index(q, q->write_ptr); + + return w >= r ? + (index >= r && index < w) : + !(index < r && index >= w); } static inline bool iwl_is_rfkill_set(struct iwl_trans *trans) @@ -801,7 +1064,7 @@ bool iwl_pcie_check_hw_rf_kill(struct iwl_trans *trans); void iwl_trans_pcie_handle_stop_rfkill(struct iwl_trans *trans, bool was_in_rfkill); void iwl_pcie_txq_free_tfd(struct iwl_trans *trans, struct iwl_txq *txq); -int iwl_queue_space(const struct iwl_txq *q); +int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q); void iwl_pcie_apm_stop_master(struct iwl_trans *trans); void iwl_pcie_conf_msix_hw(struct iwl_trans_pcie *trans_pcie); int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, @@ -818,6 +1081,9 @@ void iwl_pcie_free_tso_page(struct iwl_trans_pcie *trans_pcie, struct iwl_tso_hdr_page *get_page_hdr(struct iwl_trans *trans, size_t len); #endif +/* common functions that are used by gen3 transport */ +void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power); + /* transport gen 2 exported functions */ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, const struct fw_img *fw, bool run_in_rfkill); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c index d15f5ba2dc77..d017aa2a0a8b 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/rx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/rx.c @@ -18,8 +18,7 @@ * more details. * * You should have received a copy of the GNU General Public License along with - * this program; if not, write to the Free Software Foundation, Inc., - * 51 Franklin Street, Fifth Floor, Boston, MA 02110, USA + * this program. * * The full GNU General Public License is included in this distribution in the * file called LICENSE. @@ -37,6 +36,7 @@ #include "iwl-io.h" #include "internal.h" #include "iwl-op-mode.h" +#include "iwl-context-info-gen3.h" /****************************************************************************** * @@ -167,7 +167,12 @@ static inline __le32 iwl_pcie_dma_addr2rbd_ptr(dma_addr_t dma_addr) */ int iwl_pcie_rx_stop(struct iwl_trans *trans) { - if (trans->cfg->mq_rx_supported) { + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { + /* TODO: remove this for 22560 once fw does it */ + iwl_write_prph(trans, RFH_RXF_DMA_CFG_GEN3, 0); + return iwl_poll_prph_bit(trans, RFH_GEN_STATUS_GEN3, + RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); + } else if (trans->cfg->mq_rx_supported) { iwl_write_prph(trans, RFH_RXF_DMA_CFG, 0); return iwl_poll_prph_bit(trans, RFH_GEN_STATUS, RXF_DMA_IDLE, RXF_DMA_IDLE, 1000); @@ -209,7 +214,11 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans, } rxq->write_actual = round_down(rxq->write, 8); - if (trans->cfg->mq_rx_supported) + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) + iwl_write32(trans, HBUS_TARG_WRPTR, + (rxq->write_actual | + ((FIRST_RX_QUEUE + rxq->id) << 16))); + else if (trans->cfg->mq_rx_supported) iwl_write32(trans, RFH_Q_FRBDCB_WIDX_TRG(rxq->id), rxq->write_actual); else @@ -233,6 +242,25 @@ static void iwl_pcie_rxq_check_wrptr(struct iwl_trans *trans) } } +static void iwl_pcie_restock_bd(struct iwl_trans *trans, + struct iwl_rxq *rxq, + struct iwl_rx_mem_buffer *rxb) +{ + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { + struct iwl_rx_transfer_desc *bd = rxq->bd; + + bd[rxq->write].type_n_size = + cpu_to_le32((IWL_RX_TD_TYPE & IWL_RX_TD_TYPE_MSK) | + ((IWL_RX_TD_SIZE_2K >> 8) & IWL_RX_TD_SIZE_MSK)); + bd[rxq->write].addr = cpu_to_le64(rxb->page_dma); + bd[rxq->write].rbid = cpu_to_le16(rxb->vid); + } else { + __le64 *bd = rxq->bd; + + bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid); + } +} + /* * iwl_pcie_rxmq_restock - restock implementation for multi-queue rx */ @@ -254,8 +282,6 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans, spin_lock(&rxq->lock); while (rxq->free_count) { - __le64 *bd = (__le64 *)rxq->bd; - /* Get next free Rx buffer, remove from free list */ rxb = list_first_entry(&rxq->rx_free, struct iwl_rx_mem_buffer, list); @@ -264,7 +290,7 @@ static void iwl_pcie_rxmq_restock(struct iwl_trans *trans, /* 12 first bits are expected to be empty */ WARN_ON(rxb->page_dma & DMA_BIT_MASK(12)); /* Point to Rx buffer via next RBD in circular buffer */ - bd[rxq->write] = cpu_to_le64(rxb->page_dma | rxb->vid); + iwl_pcie_restock_bd(trans, rxq, rxb); rxq->write = (rxq->write + 1) & MQ_RX_TABLE_MASK; rxq->free_count--; } @@ -391,8 +417,8 @@ static struct page *iwl_pcie_rx_alloc_page(struct iwl_trans *trans, * iwl_pcie_rxq_restock. The latter function will update the HW to use the newly * allocated buffers. */ -static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, - struct iwl_rxq *rxq) +void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, + struct iwl_rxq *rxq) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rx_mem_buffer *rxb; @@ -448,7 +474,7 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority, } } -static void iwl_pcie_free_rbs_pool(struct iwl_trans *trans) +void iwl_pcie_free_rbs_pool(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int i; @@ -608,89 +634,174 @@ void iwl_pcie_rx_allocator_work(struct work_struct *data) iwl_pcie_rx_allocator(trans_pcie->trans); } -static int iwl_pcie_rx_alloc(struct iwl_trans *trans) +static int iwl_pcie_free_bd_size(struct iwl_trans *trans, bool use_rx_td) { - struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_rb_allocator *rba = &trans_pcie->rba; - struct device *dev = trans->dev; - int i; - int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) : - sizeof(__le32); + struct iwl_rx_transfer_desc *rx_td; - if (WARN_ON(trans_pcie->rxq)) - return -EINVAL; + if (use_rx_td) + return sizeof(*rx_td); + else + return trans->cfg->mq_rx_supported ? sizeof(__le64) : + sizeof(__le32); +} - trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq), - GFP_KERNEL); - if (!trans_pcie->rxq) - return -EINVAL; +static void iwl_pcie_free_rxq_dma(struct iwl_trans *trans, + struct iwl_rxq *rxq) +{ + struct device *dev = trans->dev; + bool use_rx_td = (trans->cfg->device_family >= + IWL_DEVICE_FAMILY_22560); + int free_size = iwl_pcie_free_bd_size(trans, use_rx_td); + + if (rxq->bd) + dma_free_coherent(trans->dev, + free_size * rxq->queue_size, + rxq->bd, rxq->bd_dma); + rxq->bd_dma = 0; + rxq->bd = NULL; + + if (rxq->rb_stts) + dma_free_coherent(trans->dev, + use_rx_td ? sizeof(__le16) : + sizeof(struct iwl_rb_status), + rxq->rb_stts, rxq->rb_stts_dma); + rxq->rb_stts_dma = 0; + rxq->rb_stts = NULL; + + if (rxq->used_bd) + dma_free_coherent(trans->dev, + (use_rx_td ? sizeof(*rxq->cd) : + sizeof(__le32)) * rxq->queue_size, + rxq->used_bd, rxq->used_bd_dma); + rxq->used_bd_dma = 0; + rxq->used_bd = NULL; + + if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) + return; - spin_lock_init(&rba->lock); + if (rxq->tr_tail) + dma_free_coherent(dev, sizeof(__le16), + rxq->tr_tail, rxq->tr_tail_dma); + rxq->tr_tail_dma = 0; + rxq->tr_tail = NULL; + + if (rxq->cr_tail) + dma_free_coherent(dev, sizeof(__le16), + rxq->cr_tail, rxq->cr_tail_dma); + rxq->cr_tail_dma = 0; + rxq->cr_tail = NULL; +} - for (i = 0; i < trans->num_rx_queues; i++) { - struct iwl_rxq *rxq = &trans_pcie->rxq[i]; +static int iwl_pcie_alloc_rxq_dma(struct iwl_trans *trans, + struct iwl_rxq *rxq) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct device *dev = trans->dev; + int i; + int free_size; + bool use_rx_td = (trans->cfg->device_family >= + IWL_DEVICE_FAMILY_22560); - spin_lock_init(&rxq->lock); - if (trans->cfg->mq_rx_supported) - rxq->queue_size = MQ_RX_TABLE_SIZE; - else - rxq->queue_size = RX_QUEUE_SIZE; + spin_lock_init(&rxq->lock); + if (trans->cfg->mq_rx_supported) + rxq->queue_size = MQ_RX_TABLE_SIZE; + else + rxq->queue_size = RX_QUEUE_SIZE; - /* - * Allocate the circular buffer of Read Buffer Descriptors - * (RBDs) - */ - rxq->bd = dma_zalloc_coherent(dev, - free_size * rxq->queue_size, - &rxq->bd_dma, GFP_KERNEL); - if (!rxq->bd) - goto err; + free_size = iwl_pcie_free_bd_size(trans, use_rx_td); - if (trans->cfg->mq_rx_supported) { - rxq->used_bd = dma_zalloc_coherent(dev, - sizeof(__le32) * - rxq->queue_size, - &rxq->used_bd_dma, - GFP_KERNEL); - if (!rxq->used_bd) - goto err; - } + /* + * Allocate the circular buffer of Read Buffer Descriptors + * (RBDs) + */ + rxq->bd = dma_zalloc_coherent(dev, + free_size * rxq->queue_size, + &rxq->bd_dma, GFP_KERNEL); + if (!rxq->bd) + goto err; - /*Allocate the driver's pointer to receive buffer status */ - rxq->rb_stts = dma_zalloc_coherent(dev, sizeof(*rxq->rb_stts), - &rxq->rb_stts_dma, + if (trans->cfg->mq_rx_supported) { + rxq->used_bd = dma_zalloc_coherent(dev, + (use_rx_td ? + sizeof(*rxq->cd) : + sizeof(__le32)) * + rxq->queue_size, + &rxq->used_bd_dma, GFP_KERNEL); - if (!rxq->rb_stts) + if (!rxq->used_bd) goto err; } + + /* Allocate the driver's pointer to receive buffer status */ + rxq->rb_stts = dma_zalloc_coherent(dev, use_rx_td ? + sizeof(__le16) : + sizeof(struct iwl_rb_status), + &rxq->rb_stts_dma, + GFP_KERNEL); + if (!rxq->rb_stts) + goto err; + + if (!use_rx_td) + return 0; + + /* Allocate the driver's pointer to TR tail */ + rxq->tr_tail = dma_zalloc_coherent(dev, sizeof(__le16), + &rxq->tr_tail_dma, + GFP_KERNEL); + if (!rxq->tr_tail) + goto err; + + /* Allocate the driver's pointer to CR tail */ + rxq->cr_tail = dma_zalloc_coherent(dev, sizeof(__le16), + &rxq->cr_tail_dma, + GFP_KERNEL); + if (!rxq->cr_tail) + goto err; + /* + * W/A 22560 device step Z0 must be non zero bug + * TODO: remove this when stop supporting Z0 + */ + *rxq->cr_tail = cpu_to_le16(500); + return 0; err: for (i = 0; i < trans->num_rx_queues; i++) { struct iwl_rxq *rxq = &trans_pcie->rxq[i]; - if (rxq->bd) - dma_free_coherent(dev, free_size * rxq->queue_size, - rxq->bd, rxq->bd_dma); - rxq->bd_dma = 0; - rxq->bd = NULL; - - if (rxq->rb_stts) - dma_free_coherent(trans->dev, - sizeof(struct iwl_rb_status), - rxq->rb_stts, rxq->rb_stts_dma); - - if (rxq->used_bd) - dma_free_coherent(dev, sizeof(__le32) * rxq->queue_size, - rxq->used_bd, rxq->used_bd_dma); - rxq->used_bd_dma = 0; - rxq->used_bd = NULL; + iwl_pcie_free_rxq_dma(trans, rxq); } kfree(trans_pcie->rxq); return -ENOMEM; } +static int iwl_pcie_rx_alloc(struct iwl_trans *trans) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rb_allocator *rba = &trans_pcie->rba; + int i, ret; + + if (WARN_ON(trans_pcie->rxq)) + return -EINVAL; + + trans_pcie->rxq = kcalloc(trans->num_rx_queues, sizeof(struct iwl_rxq), + GFP_KERNEL); + if (!trans_pcie->rxq) + return -EINVAL; + + spin_lock_init(&rba->lock); + + for (i = 0; i < trans->num_rx_queues; i++) { + struct iwl_rxq *rxq = &trans_pcie->rxq[i]; + + ret = iwl_pcie_alloc_rxq_dma(trans, rxq); + if (ret) + return ret; + } + return 0; +} + static void iwl_pcie_rx_hw_init(struct iwl_trans *trans, struct iwl_rxq *rxq) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -792,6 +903,9 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) int i; switch (trans_pcie->rx_buf_size) { + case IWL_AMSDU_2K: + rb_size = RFH_RXF_DMA_RB_SIZE_2K; + break; case IWL_AMSDU_4K: rb_size = RFH_RXF_DMA_RB_SIZE_4K; break; @@ -872,7 +986,7 @@ static void iwl_pcie_rx_mq_hw_init(struct iwl_trans *trans) iwl_pcie_enable_rx_wake(trans, true); } -static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) +void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) { lockdep_assert_held(&rxq->lock); @@ -882,7 +996,7 @@ static void iwl_pcie_rx_init_rxb_lists(struct iwl_rxq *rxq) rxq->used_count = 0; } -static int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget) +int iwl_pcie_dummy_napi_poll(struct napi_struct *napi, int budget) { WARN_ON(1); return 0; @@ -931,7 +1045,9 @@ static int _iwl_pcie_rx_init(struct iwl_trans *trans) rxq->read = 0; rxq->write = 0; rxq->write_actual = 0; - memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts)); + memset(rxq->rb_stts, 0, + (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ? + sizeof(__le16) : sizeof(struct iwl_rb_status)); iwl_pcie_rx_init_rxb_lists(rxq); @@ -1002,8 +1118,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_rb_allocator *rba = &trans_pcie->rba; - int free_size = trans->cfg->mq_rx_supported ? sizeof(__le64) : - sizeof(__le32); int i; /* @@ -1022,27 +1136,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans) for (i = 0; i < trans->num_rx_queues; i++) { struct iwl_rxq *rxq = &trans_pcie->rxq[i]; - if (rxq->bd) - dma_free_coherent(trans->dev, - free_size * rxq->queue_size, - rxq->bd, rxq->bd_dma); - rxq->bd_dma = 0; - rxq->bd = NULL; - - if (rxq->rb_stts) - dma_free_coherent(trans->dev, - sizeof(struct iwl_rb_status), - rxq->rb_stts, rxq->rb_stts_dma); - else - IWL_DEBUG_INFO(trans, - "Free rxq->rb_stts which is NULL\n"); - - if (rxq->used_bd) - dma_free_coherent(trans->dev, - sizeof(__le32) * rxq->queue_size, - rxq->used_bd, rxq->used_bd_dma); - rxq->used_bd_dma = 0; - rxq->used_bd = NULL; + iwl_pcie_free_rxq_dma(trans, rxq); if (rxq->napi.poll) netif_napi_del(&rxq->napi); @@ -1202,6 +1296,8 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, } page_stolen |= rxcb._page_stolen; + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) + break; offset += ALIGN(len, FH_RSCSR_FRAME_ALIGN); } @@ -1236,6 +1332,45 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans, iwl_pcie_rx_reuse_rbd(trans, rxb, rxq, emergency); } +static struct iwl_rx_mem_buffer *iwl_pcie_get_rxb(struct iwl_trans *trans, + struct iwl_rxq *rxq, int i) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + struct iwl_rx_mem_buffer *rxb; + u16 vid; + + if (!trans->cfg->mq_rx_supported) { + rxb = rxq->queue[i]; + rxq->queue[i] = NULL; + return rxb; + } + + /* used_bd is a 32/16 bit but only 12 are used to retrieve the vid */ + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) + vid = le16_to_cpu(rxq->cd[i].rbid) & 0x0FFF; + else + vid = le32_to_cpu(rxq->bd_32[i]) & 0x0FFF; + + if (!vid || vid > ARRAY_SIZE(trans_pcie->global_table)) + goto out_err; + + rxb = trans_pcie->global_table[vid - 1]; + if (rxb->invalid) + goto out_err; + + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) + rxb->size = le32_to_cpu(rxq->cd[i].size) & IWL_RX_CD_SIZE; + + rxb->invalid = true; + + return rxb; + +out_err: + WARN(1, "Invalid rxb from HW %u\n", (u32)vid); + iwl_force_nmi(trans); + return NULL; +} + /* * iwl_pcie_rx_handle - Main entry function for receiving responses from fw */ @@ -1250,7 +1385,7 @@ restart: spin_lock(&rxq->lock); /* uCode's read index (stored in shared DRAM) indicates the last Rx * buffer that the driver may process (last buffer filled by ucode). */ - r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; + r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF; i = rxq->read; /* W/A 9000 device step A0 wrap-around bug */ @@ -1266,30 +1401,9 @@ restart: if (unlikely(rxq->used_count == rxq->queue_size / 2)) emergency = true; - if (trans->cfg->mq_rx_supported) { - /* - * used_bd is a 32 bit but only 12 are used to retrieve - * the vid - */ - u16 vid = le32_to_cpu(rxq->used_bd[i]) & 0x0FFF; - - if (WARN(!vid || - vid > ARRAY_SIZE(trans_pcie->global_table), - "Invalid rxb index from HW %u\n", (u32)vid)) { - iwl_force_nmi(trans); - goto out; - } - rxb = trans_pcie->global_table[vid - 1]; - if (WARN(rxb->invalid, - "Invalid rxb from HW %u\n", (u32)vid)) { - iwl_force_nmi(trans); - goto out; - } - rxb->invalid = true; - } else { - rxb = rxq->queue[i]; - rxq->queue[i] = NULL; - } + rxb = iwl_pcie_get_rxb(trans, rxq, i); + if (!rxb) + goto out; IWL_DEBUG_RX(trans, "Q %d: HW = %d, SW = %d\n", rxq->id, r, i); iwl_pcie_rx_handle_rb(trans, rxq, rxb, emergency); @@ -1331,6 +1445,9 @@ restart: out: /* Backtrack one entry */ rxq->read = i; + /* update cr tail with the rxq read pointer */ + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) + *rxq->cr_tail = cpu_to_le16(r); spin_unlock(&rxq->lock); /* @@ -1362,20 +1479,6 @@ static struct iwl_trans_pcie *iwl_pcie_get_trans_pcie(struct msix_entry *entry) return container_of(entries, struct iwl_trans_pcie, msix_entries[0]); } -static inline void iwl_pcie_clear_irq(struct iwl_trans *trans, - struct msix_entry *entry) -{ - /* - * Before sending the interrupt the HW disables it to prevent - * a nested interrupt. This is done by writing 1 to the corresponding - * bit in the mask register. After handling the interrupt, it should be - * re-enabled by clearing this bit. This register is defined as - * write 1 clear (W1C) register, meaning that it's being clear - * by writing 1 to the bit. - */ - iwl_write32(trans, CSR_MSIX_AUTOMASK_ST_AD, BIT(entry->entry)); -} - /* * iwl_pcie_rx_msix_handle - Main entry function for receiving responses from fw * This interrupt handler should be used with RSS queue only. @@ -1970,7 +2073,8 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) /* Error detected by uCode */ if ((inta_fh & MSIX_FH_INT_CAUSES_FH_ERR) || - (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR)) { + (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR) || + (inta_hw & MSIX_HW_INT_CAUSES_REG_SW_ERR_V2)) { IWL_ERR(trans, "Microcode SW error detected. Restarting 0x%X.\n", inta_fh); @@ -1995,8 +2099,18 @@ irqreturn_t iwl_pcie_irq_msix_handler(int irq, void *dev_id) } } - /* uCode wakes up after power-down sleep */ - if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) { + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560 && + inta_hw & MSIX_HW_INT_CAUSES_REG_IPC) { + /* Reflect IML transfer status */ + int res = iwl_read32(trans, CSR_IML_RESP_ADDR); + + IWL_DEBUG_ISR(trans, "IML transfer status: %d\n", res); + if (res == IWL_IMAGE_RESP_FAIL) { + isr_stats->sw++; + iwl_pcie_irq_handle_error(trans); + } + } else if (inta_hw & MSIX_HW_INT_CAUSES_REG_WAKEUP) { + /* uCode wakes up after power-down sleep */ IWL_DEBUG_ISR(trans, "Wakeup interrupt\n"); iwl_pcie_rxq_check_wrptr(trans); iwl_pcie_txq_check_wrptrs(trans); diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c index b8e8dac2895d..2bc67219ed3e 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans-gen2.c @@ -53,6 +53,7 @@ #include "iwl-trans.h" #include "iwl-prph.h" #include "iwl-context-info.h" +#include "iwl-context-info-gen3.h" #include "internal.h" /* @@ -188,7 +189,10 @@ void _iwl_trans_pcie_gen2_stop_device(struct iwl_trans *trans, bool low_power) } iwl_pcie_ctxt_info_free_paging(trans); - iwl_pcie_ctxt_info_free(trans); + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560) + iwl_pcie_ctxt_info_gen3_free(trans); + else + iwl_pcie_ctxt_info_free(trans); /* Make sure (redundant) we've released our request to stay awake */ iwl_clear_bit(trans, CSR_GP_CNTRL, @@ -346,7 +350,10 @@ int iwl_trans_pcie_gen2_start_fw(struct iwl_trans *trans, goto out; } - ret = iwl_pcie_ctxt_info_init(trans, fw); + if (trans->cfg->device_family == IWL_DEVICE_FAMILY_22560) + ret = iwl_pcie_ctxt_info_gen3_init(trans, fw); + else + ret = iwl_pcie_ctxt_info_init(trans, fw); if (ret) goto out; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c index 7229991ae70d..7d319b6863fe 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/trans.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/trans.c @@ -84,6 +84,7 @@ #include "iwl-scd.h" #include "iwl-agn-hw.h" #include "fw/error-dump.h" +#include "fw/dbg.h" #include "internal.h" #include "iwl-fh.h" @@ -203,7 +204,7 @@ static void iwl_pcie_free_fw_monitor(struct iwl_trans *trans) trans_pcie->fw_mon_size = 0; } -static void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power) +void iwl_pcie_alloc_fw_monitor(struct iwl_trans *trans, u8 max_power) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct page *page = NULL; @@ -1132,21 +1133,44 @@ static struct iwl_causes_list causes_list[] = { {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E}, }; +static struct iwl_causes_list causes_list_v2[] = { + {MSIX_FH_INT_CAUSES_D2S_CH0_NUM, CSR_MSIX_FH_INT_MASK_AD, 0}, + {MSIX_FH_INT_CAUSES_D2S_CH1_NUM, CSR_MSIX_FH_INT_MASK_AD, 0x1}, + {MSIX_FH_INT_CAUSES_S2D, CSR_MSIX_FH_INT_MASK_AD, 0x3}, + {MSIX_FH_INT_CAUSES_FH_ERR, CSR_MSIX_FH_INT_MASK_AD, 0x5}, + {MSIX_HW_INT_CAUSES_REG_ALIVE, CSR_MSIX_HW_INT_MASK_AD, 0x10}, + {MSIX_HW_INT_CAUSES_REG_IPC, CSR_MSIX_HW_INT_MASK_AD, 0x11}, + {MSIX_HW_INT_CAUSES_REG_SW_ERR_V2, CSR_MSIX_HW_INT_MASK_AD, 0x15}, + {MSIX_HW_INT_CAUSES_REG_CT_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x16}, + {MSIX_HW_INT_CAUSES_REG_RF_KILL, CSR_MSIX_HW_INT_MASK_AD, 0x17}, + {MSIX_HW_INT_CAUSES_REG_PERIODIC, CSR_MSIX_HW_INT_MASK_AD, 0x18}, + {MSIX_HW_INT_CAUSES_REG_SCD, CSR_MSIX_HW_INT_MASK_AD, 0x2A}, + {MSIX_HW_INT_CAUSES_REG_FH_TX, CSR_MSIX_HW_INT_MASK_AD, 0x2B}, + {MSIX_HW_INT_CAUSES_REG_HW_ERR, CSR_MSIX_HW_INT_MASK_AD, 0x2D}, + {MSIX_HW_INT_CAUSES_REG_HAP, CSR_MSIX_HW_INT_MASK_AD, 0x2E}, +}; + static void iwl_pcie_map_non_rx_causes(struct iwl_trans *trans) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); int val = trans_pcie->def_irq | MSIX_NON_AUTO_CLEAR_CAUSE; - int i; + int i, arr_size = + (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) ? + ARRAY_SIZE(causes_list) : ARRAY_SIZE(causes_list_v2); /* * Access all non RX causes and map them to the default irq. * In case we are missing at least one interrupt vector, * the first interrupt vector will serve non-RX and FBQ causes. */ - for (i = 0; i < ARRAY_SIZE(causes_list); i++) { - iwl_write8(trans, CSR_MSIX_IVAR(causes_list[i].addr), val); - iwl_clear_bit(trans, causes_list[i].mask_reg, - causes_list[i].cause_num); + for (i = 0; i < arr_size; i++) { + struct iwl_causes_list *causes = + (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) ? + causes_list : causes_list_v2; + + iwl_write8(trans, CSR_MSIX_IVAR(causes[i].addr), val); + iwl_clear_bit(trans, causes[i].mask_reg, + causes[i].cause_num); } } @@ -1539,18 +1563,6 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, iwl_pcie_enable_rx_wake(trans, true); - /* - * Reconfigure IVAR table in case of MSIX or reset ict table in - * MSI mode since HW reset erased it. - * Also enables interrupts - none will happen as - * the device doesn't know we're waking it up, only when - * the opmode actually tells it after this call. - */ - iwl_pcie_conf_msix_hw(trans_pcie); - if (!trans_pcie->msix_enabled) - iwl_pcie_reset_ict(trans); - iwl_enable_interrupts(trans); - iwl_set_bit(trans, CSR_GP_CNTRL, BIT(trans->cfg->csr->flag_mac_access_req)); iwl_set_bit(trans, CSR_GP_CNTRL, @@ -1568,6 +1580,18 @@ static int iwl_trans_pcie_d3_resume(struct iwl_trans *trans, return ret; } + /* + * Reconfigure IVAR table in case of MSIX or reset ict table in + * MSI mode since HW reset erased it. + * Also enables interrupts - none will happen as + * the device doesn't know we're waking it up, only when + * the opmode actually tells it after this call. + */ + iwl_pcie_conf_msix_hw(trans_pcie); + if (!trans_pcie->msix_enabled) + iwl_pcie_reset_ict(trans); + iwl_enable_interrupts(trans); + iwl_pcie_set_pwr(trans, false); if (!reset) { @@ -1685,29 +1709,6 @@ static void iwl_pcie_irq_set_affinity(struct iwl_trans *trans) } } -static const char *queue_name(struct device *dev, - struct iwl_trans_pcie *trans_p, int i) -{ - if (trans_p->shared_vec_mask) { - int vec = trans_p->shared_vec_mask & - IWL_SHARED_IRQ_FIRST_RSS ? 1 : 0; - - if (i == 0) - return DRV_NAME ": shared IRQ"; - - return devm_kasprintf(dev, GFP_KERNEL, - DRV_NAME ": queue %d", i + vec); - } - if (i == 0) - return DRV_NAME ": default queue"; - - if (i == trans_p->alloc_vecs - 1) - return DRV_NAME ": exception"; - - return devm_kasprintf(dev, GFP_KERNEL, - DRV_NAME ": queue %d", i); -} - static int iwl_pcie_init_msix_handler(struct pci_dev *pdev, struct iwl_trans_pcie *trans_pcie) { @@ -2236,12 +2237,28 @@ void iwl_trans_pcie_log_scd_error(struct iwl_trans *trans, struct iwl_txq *txq) jiffies_to_msecs(txq->wd_timeout), txq->read_ptr, txq->write_ptr, iwl_read_prph(trans, SCD_QUEUE_RDPTR(txq_id)) & - (TFD_QUEUE_SIZE_MAX - 1), + (trans->cfg->base_params->max_tfd_queue_size - 1), iwl_read_prph(trans, SCD_QUEUE_WRPTR(txq_id)) & - (TFD_QUEUE_SIZE_MAX - 1), + (trans->cfg->base_params->max_tfd_queue_size - 1), iwl_read_direct32(trans, FH_TX_TRB_REG(fifo))); } +static int iwl_trans_pcie_rxq_dma_data(struct iwl_trans *trans, int queue, + struct iwl_trans_rxq_dma_data *data) +{ + struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + + if (queue >= trans->num_rx_queues || !trans_pcie->rxq) + return -EINVAL; + + data->fr_bd_cb = trans_pcie->rxq[queue].bd_dma; + data->urbd_stts_wrptr = trans_pcie->rxq[queue].rb_stts_dma; + data->ur_bd_cb = trans_pcie->rxq[queue].used_bd_dma; + data->fr_bd_wid = 0; + + return 0; +} + static int iwl_trans_pcie_wait_txq_empty(struct iwl_trans *trans, int txq_idx) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); @@ -2522,10 +2539,11 @@ static ssize_t iwl_dbgfs_rx_queue_read(struct file *file, pos += scnprintf(buf + pos, bufsz - pos, "\tfree_count: %u\n", rxq->free_count); if (rxq->rb_stts) { + u32 r = __le16_to_cpu(iwl_get_closed_rb_stts(trans, + rxq)); pos += scnprintf(buf + pos, bufsz - pos, "\tclosed_rb_num: %u\n", - le16_to_cpu(rxq->rb_stts->closed_rb_num) & - 0x0FFF); + r & 0x0FFF); } else { pos += scnprintf(buf + pos, bufsz - pos, "\tclosed_rb_num: Not Allocated\n"); @@ -2731,7 +2749,7 @@ static u32 iwl_trans_pcie_dump_rbs(struct iwl_trans *trans, spin_lock(&rxq->lock); - r = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) & 0x0FFF; + r = le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) & 0x0FFF; for (i = rxq->read, j = 0; i != r && j < allocated_rb_nums; @@ -2934,11 +2952,12 @@ static struct iwl_trans_dump_data struct iwl_txq *cmdq = trans_pcie->txq[trans_pcie->cmd_queue]; struct iwl_fw_error_dump_txcmd *txcmd; struct iwl_trans_dump_data *dump_data; - u32 len, num_rbs; + u32 len, num_rbs = 0; u32 monitor_len; int i, ptr; bool dump_rbs = test_bit(STATUS_FW_ERROR, &trans->status) && - !trans->cfg->mq_rx_supported; + !trans->cfg->mq_rx_supported && + trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_RB); /* transport dump header */ len = sizeof(*dump_data); @@ -2990,6 +3009,10 @@ static struct iwl_trans_dump_data } if (trigger && (trigger->mode & IWL_FW_DBG_TRIGGER_MONITOR_ONLY)) { + if (!(trans->dbg_dump_mask & + BIT(IWL_FW_ERROR_DUMP_FW_MONITOR))) + return NULL; + dump_data = vzalloc(len); if (!dump_data) return NULL; @@ -3002,22 +3025,28 @@ static struct iwl_trans_dump_data } /* CSR registers */ - len += sizeof(*data) + IWL_CSR_TO_DUMP; + if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR)) + len += sizeof(*data) + IWL_CSR_TO_DUMP; /* FH registers */ - if (trans->cfg->gen2) - len += sizeof(*data) + - (FH_MEM_UPPER_BOUND_GEN2 - FH_MEM_LOWER_BOUND_GEN2); - else - len += sizeof(*data) + - (FH_MEM_UPPER_BOUND - FH_MEM_LOWER_BOUND); + if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) { + if (trans->cfg->gen2) + len += sizeof(*data) + + (FH_MEM_UPPER_BOUND_GEN2 - + FH_MEM_LOWER_BOUND_GEN2); + else + len += sizeof(*data) + + (FH_MEM_UPPER_BOUND - + FH_MEM_LOWER_BOUND); + } if (dump_rbs) { /* Dump RBs is supported only for pre-9000 devices (1 queue) */ struct iwl_rxq *rxq = &trans_pcie->rxq[0]; /* RBs */ - num_rbs = le16_to_cpu(READ_ONCE(rxq->rb_stts->closed_rb_num)) - & 0x0FFF; + num_rbs = + le16_to_cpu(iwl_get_closed_rb_stts(trans, rxq)) + & 0x0FFF; num_rbs = (num_rbs - rxq->read) & RX_QUEUE_MASK; len += num_rbs * (sizeof(*data) + sizeof(struct iwl_fw_error_dump_rb) + @@ -3025,7 +3054,8 @@ static struct iwl_trans_dump_data } /* Paged memory for gen2 HW */ - if (trans->cfg->gen2) + if (trans->cfg->gen2 && + trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++) len += sizeof(*data) + sizeof(struct iwl_fw_error_dump_paging) + @@ -3037,41 +3067,51 @@ static struct iwl_trans_dump_data len = 0; data = (void *)dump_data->data; - data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD); - txcmd = (void *)data->data; - spin_lock_bh(&cmdq->lock); - ptr = cmdq->write_ptr; - for (i = 0; i < cmdq->n_window; i++) { - u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr); - u32 caplen, cmdlen; - - cmdlen = iwl_trans_pcie_get_cmdlen(trans, cmdq->tfds + - trans_pcie->tfd_size * ptr); - caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen); - - if (cmdlen) { - len += sizeof(*txcmd) + caplen; - txcmd->cmdlen = cpu_to_le32(cmdlen); - txcmd->caplen = cpu_to_le32(caplen); - memcpy(txcmd->data, cmdq->entries[idx].cmd, caplen); - txcmd = (void *)((u8 *)txcmd->data + caplen); + + if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_TXCMD)) { + u16 tfd_size = trans_pcie->tfd_size; + + data->type = cpu_to_le32(IWL_FW_ERROR_DUMP_TXCMD); + txcmd = (void *)data->data; + spin_lock_bh(&cmdq->lock); + ptr = cmdq->write_ptr; + for (i = 0; i < cmdq->n_window; i++) { + u8 idx = iwl_pcie_get_cmd_index(cmdq, ptr); + u32 caplen, cmdlen; + + cmdlen = iwl_trans_pcie_get_cmdlen(trans, + cmdq->tfds + + tfd_size * ptr); + caplen = min_t(u32, TFD_MAX_PAYLOAD_SIZE, cmdlen); + + if (cmdlen) { + len += sizeof(*txcmd) + caplen; + txcmd->cmdlen = cpu_to_le32(cmdlen); + txcmd->caplen = cpu_to_le32(caplen); + memcpy(txcmd->data, cmdq->entries[idx].cmd, + caplen); + txcmd = (void *)((u8 *)txcmd->data + caplen); + } + + ptr = iwl_queue_dec_wrap(trans, ptr); } + spin_unlock_bh(&cmdq->lock); - ptr = iwl_queue_dec_wrap(ptr); + data->len = cpu_to_le32(len); + len += sizeof(*data); + data = iwl_fw_error_next_data(data); } - spin_unlock_bh(&cmdq->lock); - data->len = cpu_to_le32(len); - len += sizeof(*data); - data = iwl_fw_error_next_data(data); - - len += iwl_trans_pcie_dump_csr(trans, &data); - len += iwl_trans_pcie_fh_regs_dump(trans, &data); + if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_CSR)) + len += iwl_trans_pcie_dump_csr(trans, &data); + if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FH_REGS)) + len += iwl_trans_pcie_fh_regs_dump(trans, &data); if (dump_rbs) len += iwl_trans_pcie_dump_rbs(trans, &data, num_rbs); /* Paged memory for gen2 HW */ - if (trans->cfg->gen2) { + if (trans->cfg->gen2 && + trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_PAGING)) { for (i = 0; i < trans_pcie->init_dram.paging_cnt; i++) { struct iwl_fw_error_dump_paging *paging; dma_addr_t addr = @@ -3091,8 +3131,8 @@ static struct iwl_trans_dump_data len += sizeof(*data) + sizeof(*paging) + page_len; } } - - len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len); + if (trans->dbg_dump_mask & BIT(IWL_FW_ERROR_DUMP_FW_MONITOR)) + len += iwl_trans_pcie_dump_monitor(trans, &data, monitor_len); dump_data->len = len; @@ -3187,6 +3227,7 @@ static const struct iwl_trans_ops trans_ops_pcie_gen2 = { .txq_alloc = iwl_trans_pcie_dyn_txq_alloc, .txq_free = iwl_trans_pcie_dyn_txq_free, .wait_txq_empty = iwl_trans_pcie_wait_txq_empty, + .rxq_dma_data = iwl_trans_pcie_rxq_dma_data, }; struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, @@ -3349,14 +3390,26 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev, #if IS_ENABLED(CONFIG_IWLMVM) trans->hw_rf_id = iwl_read32(trans, CSR_HW_RF_ID); - if (trans->hw_rf_id == CSR_HW_RF_ID_TYPE_HR) { + + if (CSR_HW_RF_ID_TYPE_CHIP_ID(trans->hw_rf_id) == + CSR_HW_RF_ID_TYPE_CHIP_ID(CSR_HW_RF_ID_TYPE_HR)) { u32 hw_status; hw_status = iwl_read_prph(trans, UMAG_GEN_HW_STATUS); - if (hw_status & UMAG_GEN_HW_IS_FPGA) - trans->cfg = &iwl22000_2ax_cfg_qnj_hr_f0; - else + if (CSR_HW_RF_STEP(trans->hw_rf_id) == SILICON_B_STEP) + /* + * b step fw is the same for physical card and fpga + */ + trans->cfg = &iwl22000_2ax_cfg_qnj_hr_b0; + else if ((hw_status & UMAG_GEN_HW_IS_FPGA) && + CSR_HW_RF_STEP(trans->hw_rf_id) == SILICON_A_STEP) { + trans->cfg = &iwl22000_2ax_cfg_qnj_hr_a0_f0; + } else { + /* + * a step no FPGA + */ trans->cfg = &iwl22000_2ac_cfg_hr; + } } #endif diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c index 48890a1c825f..b99f33ff9123 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx-gen2.c @@ -6,6 +6,7 @@ * GPL LICENSE SUMMARY * * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * * This program is free software; you can redistribute it and/or modify * it under the terms of version 2 of the GNU General Public License as @@ -19,6 +20,7 @@ * BSD LICENSE * * Copyright(c) 2017 Intel Deutschland GmbH + * Copyright(c) 2018 Intel Corporation * All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -50,6 +52,7 @@ *****************************************************************************/ #include <linux/pm_runtime.h> #include <net/tso.h> +#include <linux/tcp.h> #include "iwl-debug.h" #include "iwl-csr.h" @@ -84,16 +87,20 @@ void iwl_pcie_gen2_tx_stop(struct iwl_trans *trans) /* * iwl_pcie_txq_update_byte_tbl - Set up entry in Tx byte-count array */ -static void iwl_pcie_gen2_update_byte_tbl(struct iwl_txq *txq, u16 byte_cnt, +static void iwl_pcie_gen2_update_byte_tbl(struct iwl_trans_pcie *trans_pcie, + struct iwl_txq *txq, u16 byte_cnt, int num_tbs) { struct iwlagn_scd_bc_tbl *scd_bc_tbl = txq->bc_tbl.addr; + struct iwl_trans *trans = iwl_trans_pcie_get_trans(trans_pcie); + struct iwl_gen3_bc_tbl *scd_bc_tbl_gen3 = txq->bc_tbl.addr; int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); u8 filled_tfd_size, num_fetch_chunks; u16 len = byte_cnt; __le16 bc_ent; - len = DIV_ROUND_UP(len, 4); + if (trans_pcie->bc_table_dword) + len = DIV_ROUND_UP(len, 4); if (WARN_ON(len > 0xFFF || idx >= txq->n_window)) return; @@ -111,7 +118,10 @@ static void iwl_pcie_gen2_update_byte_tbl(struct iwl_txq *txq, u16 byte_cnt, num_fetch_chunks = DIV_ROUND_UP(filled_tfd_size, 64) - 1; bc_ent = cpu_to_le16(len | (num_fetch_chunks << 12)); - scd_bc_tbl->tfd_offset[idx] = bc_ent; + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) + scd_bc_tbl_gen3->tfd_offset[idx] = bc_ent; + else + scd_bc_tbl->tfd_offset[idx] = bc_ent; } /* @@ -355,52 +365,89 @@ out_err: return -EINVAL; } -static -struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, - struct iwl_txq *txq, - struct iwl_device_cmd *dev_cmd, - struct sk_buff *skb, - struct iwl_cmd_meta *out_meta) +static struct +iwl_tfh_tfd *iwl_pcie_gen2_build_tx_amsdu(struct iwl_trans *trans, + struct iwl_txq *txq, + struct iwl_device_cmd *dev_cmd, + struct sk_buff *skb, + struct iwl_cmd_meta *out_meta, + int hdr_len, + int tx_cmd_len) { - struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx); dma_addr_t tb_phys; - bool amsdu; - int i, len, tb1_len, tb2_len, hdr_len; + int len; void *tb1_addr; - memset(tfd, 0, sizeof(*tfd)); + tb_phys = iwl_pcie_get_first_tb_dma(txq, idx); - amsdu = ieee80211_is_data_qos(hdr->frame_control) && - (*ieee80211_get_qos_ctl(hdr) & - IEEE80211_QOS_CTL_A_MSDU_PRESENT); + iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); + + /* + * The second TB (tb1) points to the remainder of the TX command + * and the 802.11 header - dword aligned size + * (This calculation modifies the TX command, so do it before the + * setup of the first TB) + */ + len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - + IWL_FIRST_TB_SIZE; + + /* do not align A-MSDU to dword as the subframe header aligns it */ + + /* map the data for TB1 */ + tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; + tb_phys = dma_map_single(trans->dev, tb1_addr, len, DMA_TO_DEVICE); + if (unlikely(dma_mapping_error(trans->dev, tb_phys))) + goto out_err; + iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, len); + + if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd, + len + IWL_FIRST_TB_SIZE, + hdr_len, dev_cmd)) + goto out_err; + + /* building the A-MSDU might have changed this data, memcpy it now */ + memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE); + return tfd; + +out_err: + iwl_pcie_gen2_tfd_unmap(trans, out_meta, tfd); + return NULL; +} + +static struct +iwl_tfh_tfd *iwl_pcie_gen2_build_tx(struct iwl_trans *trans, + struct iwl_txq *txq, + struct iwl_device_cmd *dev_cmd, + struct sk_buff *skb, + struct iwl_cmd_meta *out_meta, + int hdr_len, + int tx_cmd_len) +{ + int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); + struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx); + dma_addr_t tb_phys; + int i, len, tb1_len, tb2_len; + void *tb1_addr; tb_phys = iwl_pcie_get_first_tb_dma(txq, idx); + /* The first TB points to bi-directional DMA data */ - if (!amsdu) - memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, - IWL_FIRST_TB_SIZE); + memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, IWL_FIRST_TB_SIZE); iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, IWL_FIRST_TB_SIZE); - /* there must be data left over for TB1 or this code must be changed */ - BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE); - /* * The second TB (tb1) points to the remainder of the TX command * and the 802.11 header - dword aligned size * (This calculation modifies the TX command, so do it before the * setup of the first TB) */ - len = sizeof(struct iwl_tx_cmd_gen2) + sizeof(struct iwl_cmd_header) + - ieee80211_hdrlen(hdr->frame_control) - IWL_FIRST_TB_SIZE; + len = tx_cmd_len + sizeof(struct iwl_cmd_header) + hdr_len - + IWL_FIRST_TB_SIZE; - /* do not align A-MSDU to dword as the subframe header aligns it */ - if (amsdu) - tb1_len = len; - else - tb1_len = ALIGN(len, 4); + tb1_len = ALIGN(len, 4); /* map the data for TB1 */ tb1_addr = ((u8 *)&dev_cmd->hdr) + IWL_FIRST_TB_SIZE; @@ -409,23 +456,6 @@ struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, goto out_err; iwl_pcie_gen2_set_tb(trans, tfd, tb_phys, tb1_len); - hdr_len = ieee80211_hdrlen(hdr->frame_control); - - if (amsdu) { - if (iwl_pcie_gen2_build_amsdu(trans, skb, tfd, - tb1_len + IWL_FIRST_TB_SIZE, - hdr_len, dev_cmd)) - goto out_err; - - /* - * building the A-MSDU might have changed this data, so memcpy - * it now - */ - memcpy(&txq->first_tb_bufs[idx], &dev_cmd->hdr, - IWL_FIRST_TB_SIZE); - return tfd; - } - /* set up TFD's third entry to point to remainder of skb's head */ tb2_len = skb_headlen(skb) - hdr_len; @@ -467,13 +497,50 @@ out_err: return NULL; } +static +struct iwl_tfh_tfd *iwl_pcie_gen2_build_tfd(struct iwl_trans *trans, + struct iwl_txq *txq, + struct iwl_device_cmd *dev_cmd, + struct sk_buff *skb, + struct iwl_cmd_meta *out_meta) +{ + struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)skb->data; + int idx = iwl_pcie_get_cmd_index(txq, txq->write_ptr); + struct iwl_tfh_tfd *tfd = iwl_pcie_get_tfd(trans, txq, idx); + int len, hdr_len; + bool amsdu; + + /* There must be data left over for TB1 or this code must be changed */ + BUILD_BUG_ON(sizeof(struct iwl_tx_cmd_gen2) < IWL_FIRST_TB_SIZE); + + memset(tfd, 0, sizeof(*tfd)); + + if (trans->cfg->device_family < IWL_DEVICE_FAMILY_22560) + len = sizeof(struct iwl_tx_cmd_gen2); + else + len = sizeof(struct iwl_tx_cmd_gen3); + + amsdu = ieee80211_is_data_qos(hdr->frame_control) && + (*ieee80211_get_qos_ctl(hdr) & + IEEE80211_QOS_CTL_A_MSDU_PRESENT); + + hdr_len = ieee80211_hdrlen(hdr->frame_control); + + if (amsdu) + return iwl_pcie_gen2_build_tx_amsdu(trans, txq, dev_cmd, skb, + out_meta, hdr_len, len); + + return iwl_pcie_gen2_build_tx(trans, txq, dev_cmd, skb, out_meta, + hdr_len, len); +} + int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, struct iwl_device_cmd *dev_cmd, int txq_id) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - struct iwl_tx_cmd_gen2 *tx_cmd = (void *)dev_cmd->payload; struct iwl_cmd_meta *out_meta; struct iwl_txq *txq = trans_pcie->txq[txq_id]; + u16 cmd_len; int idx; void *tfd; @@ -488,11 +555,23 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, spin_lock(&txq->lock); - if (iwl_queue_space(txq) < txq->high_mark) { + if (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) { + struct iwl_tx_cmd_gen3 *tx_cmd_gen3 = + (void *)dev_cmd->payload; + + cmd_len = le16_to_cpu(tx_cmd_gen3->len); + } else { + struct iwl_tx_cmd_gen2 *tx_cmd_gen2 = + (void *)dev_cmd->payload; + + cmd_len = le16_to_cpu(tx_cmd_gen2->len); + } + + if (iwl_queue_space(trans, txq) < txq->high_mark) { iwl_stop_queue(trans, txq); /* don't put the packet on the ring, if there is no room */ - if (unlikely(iwl_queue_space(txq) < 3)) { + if (unlikely(iwl_queue_space(trans, txq) < 3)) { struct iwl_device_cmd **dev_cmd_ptr; dev_cmd_ptr = (void *)((u8 *)skb->cb + @@ -526,7 +605,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, } /* Set up entry for this TFD in Tx byte-count array */ - iwl_pcie_gen2_update_byte_tbl(txq, le16_to_cpu(tx_cmd->len), + iwl_pcie_gen2_update_byte_tbl(trans_pcie, txq, cmd_len, iwl_pcie_gen2_get_num_tbs(trans, tfd)); /* start timer if queue currently empty */ @@ -538,7 +617,7 @@ int iwl_trans_pcie_gen2_tx(struct iwl_trans *trans, struct sk_buff *skb, } /* Tell device the write index *just past* this latest filled TFD */ - txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr); + txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr); iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq); /* * At this point the frame is "transmitted" successfully @@ -650,7 +729,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, tfd = iwl_pcie_get_tfd(trans, txq, txq->write_ptr); memset(tfd, 0, sizeof(*tfd)); - if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { + if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { spin_unlock_bh(&txq->lock); IWL_ERR(trans, "No space in command queue\n"); @@ -787,7 +866,7 @@ static int iwl_pcie_gen2_enqueue_hcmd(struct iwl_trans *trans, iwl_trans_ref(trans); } /* Increment and update queue's write index */ - txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr); + txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr); iwl_pcie_gen2_txq_inc_wr_ptr(trans, txq); spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); @@ -954,7 +1033,7 @@ void iwl_pcie_gen2_txq_unmap(struct iwl_trans *trans, int txq_id) iwl_pcie_free_tso_page(trans_pcie, skb); } iwl_pcie_gen2_free_tfd(trans, txq); - txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr); + txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr); if (txq->read_ptr == txq->write_ptr) { unsigned long flags; @@ -1062,6 +1141,9 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, if (!txq) return -ENOMEM; ret = iwl_pcie_alloc_dma_ptr(trans, &txq->bc_tbl, + (trans->cfg->device_family >= + IWL_DEVICE_FAMILY_22560) ? + sizeof(struct iwl_gen3_bc_tbl) : sizeof(struct iwlagn_scd_bc_tbl)); if (ret) { IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); @@ -1113,7 +1195,7 @@ int iwl_trans_pcie_dyn_txq_alloc(struct iwl_trans *trans, txq->id = qid; trans_pcie->txq[qid] = txq; - wr_ptr &= (TFD_QUEUE_SIZE_MAX - 1); + wr_ptr &= (trans->cfg->base_params->max_tfd_queue_size - 1); /* Place first TFD at index corresponding to start sequence number */ txq->read_ptr = wr_ptr; diff --git a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c index 473fe7ccb07c..93f0d387688a 100644 --- a/drivers/net/wireless/intel/iwlwifi/pcie/tx.c +++ b/drivers/net/wireless/intel/iwlwifi/pcie/tx.c @@ -71,27 +71,28 @@ * ***************************************************/ -int iwl_queue_space(const struct iwl_txq *q) +int iwl_queue_space(struct iwl_trans *trans, const struct iwl_txq *q) { unsigned int max; unsigned int used; /* * To avoid ambiguity between empty and completely full queues, there - * should always be less than TFD_QUEUE_SIZE_MAX elements in the queue. - * If q->n_window is smaller than TFD_QUEUE_SIZE_MAX, there is no need + * should always be less than max_tfd_queue_size elements in the queue. + * If q->n_window is smaller than max_tfd_queue_size, there is no need * to reserve any queue entries for this purpose. */ - if (q->n_window < TFD_QUEUE_SIZE_MAX) + if (q->n_window < trans->cfg->base_params->max_tfd_queue_size) max = q->n_window; else - max = TFD_QUEUE_SIZE_MAX - 1; + max = trans->cfg->base_params->max_tfd_queue_size - 1; /* - * TFD_QUEUE_SIZE_MAX is a power of 2, so the following is equivalent to - * modulo by TFD_QUEUE_SIZE_MAX and is well defined. + * max_tfd_queue_size is a power of 2, so the following is equivalent to + * modulo by max_tfd_queue_size and is well defined. */ - used = (q->write_ptr - q->read_ptr) & (TFD_QUEUE_SIZE_MAX - 1); + used = (q->write_ptr - q->read_ptr) & + (trans->cfg->base_params->max_tfd_queue_size - 1); if (WARN_ON(used > max)) return 0; @@ -489,7 +490,8 @@ int iwl_pcie_txq_alloc(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, bool cmd_queue) { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); - size_t tfd_sz = trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX; + size_t tfd_sz = trans_pcie->tfd_size * + trans->cfg->base_params->max_tfd_queue_size; size_t tb0_buf_sz; int i; @@ -555,12 +557,16 @@ int iwl_pcie_txq_init(struct iwl_trans *trans, struct iwl_txq *txq, int slots_num, bool cmd_queue) { int ret; + u32 tfd_queue_max_size = trans->cfg->base_params->max_tfd_queue_size; txq->need_update = false; - /* TFD_QUEUE_SIZE_MAX must be power-of-two size, otherwise + /* max_tfd_queue_size must be power-of-two size, otherwise * iwl_queue_inc_wrap and iwl_queue_dec_wrap are broken. */ - BUILD_BUG_ON(TFD_QUEUE_SIZE_MAX & (TFD_QUEUE_SIZE_MAX - 1)); + if (WARN_ONCE(tfd_queue_max_size & (tfd_queue_max_size - 1), + "Max tfd queue size must be a power of two, but is %d", + tfd_queue_max_size)) + return -EINVAL; /* Initialize queue's high/low-water marks, and head/tail indexes */ ret = iwl_queue_init(txq, slots_num); @@ -637,7 +643,7 @@ static void iwl_pcie_txq_unmap(struct iwl_trans *trans, int txq_id) iwl_pcie_free_tso_page(trans_pcie, skb); } iwl_pcie_txq_free_tfd(trans, txq); - txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr); + txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr); if (txq->read_ptr == txq->write_ptr) { unsigned long flags; @@ -696,7 +702,8 @@ static void iwl_pcie_txq_free(struct iwl_trans *trans, int txq_id) /* De-alloc circular buffer of TFDs */ if (txq->tfds) { dma_free_coherent(dev, - trans_pcie->tfd_size * TFD_QUEUE_SIZE_MAX, + trans_pcie->tfd_size * + trans->cfg->base_params->max_tfd_queue_size, txq->tfds, txq->dma_addr); txq->dma_addr = 0; txq->tfds = NULL; @@ -916,9 +923,11 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) int ret; int txq_id, slots_num; struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); + u16 bc_tbls_size = trans->cfg->base_params->num_of_queues; - u16 scd_bc_tbls_size = trans->cfg->base_params->num_of_queues * - sizeof(struct iwlagn_scd_bc_tbl); + bc_tbls_size *= (trans->cfg->device_family >= IWL_DEVICE_FAMILY_22560) ? + sizeof(struct iwl_gen3_bc_tbl) : + sizeof(struct iwlagn_scd_bc_tbl); /*It is not allowed to alloc twice, so warn when this happens. * We cannot rely on the previous allocation, so free and fail */ @@ -928,7 +937,7 @@ static int iwl_pcie_tx_alloc(struct iwl_trans *trans) } ret = iwl_pcie_alloc_dma_ptr(trans, &trans_pcie->scd_bc_tbls, - scd_bc_tbls_size); + bc_tbls_size); if (ret) { IWL_ERR(trans, "Scheduler BC Table allocation failed\n"); goto error; @@ -1064,7 +1073,8 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, { struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans); struct iwl_txq *txq = trans_pcie->txq[txq_id]; - int tfd_num = ssn & (TFD_QUEUE_SIZE_MAX - 1); + int tfd_num = iwl_pcie_get_cmd_index(txq, ssn); + int read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr); int last_to_free; /* This function is not meant to release cmd queue*/ @@ -1079,7 +1089,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, goto out; } - if (txq->read_ptr == tfd_num) + if (read_ptr == tfd_num) goto out; IWL_DEBUG_TX_REPLY(trans, "[Q %d] %d -> %d (%d)\n", @@ -1087,12 +1097,13 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, /*Since we free until index _not_ inclusive, the one before index is * the last we will free. This one must be used */ - last_to_free = iwl_queue_dec_wrap(tfd_num); + last_to_free = iwl_queue_dec_wrap(trans, tfd_num); if (!iwl_queue_used(txq, last_to_free)) { IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), last_to_free %d is out of range [0-%d] %d %d.\n", - __func__, txq_id, last_to_free, TFD_QUEUE_SIZE_MAX, + __func__, txq_id, last_to_free, + trans->cfg->base_params->max_tfd_queue_size, txq->write_ptr, txq->read_ptr); goto out; } @@ -1101,10 +1112,10 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, goto out; for (; - txq->read_ptr != tfd_num; - txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) { - int idx = iwl_pcie_get_cmd_index(txq, txq->read_ptr); - struct sk_buff *skb = txq->entries[idx].skb; + read_ptr != tfd_num; + txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr), + read_ptr = iwl_pcie_get_cmd_index(txq, txq->read_ptr)) { + struct sk_buff *skb = txq->entries[read_ptr].skb; if (WARN_ON_ONCE(!skb)) continue; @@ -1113,7 +1124,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, __skb_queue_tail(skbs, skb); - txq->entries[idx].skb = NULL; + txq->entries[read_ptr].skb = NULL; if (!trans->cfg->use_tfh) iwl_pcie_txq_inval_byte_cnt_tbl(trans, txq); @@ -1123,7 +1134,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, iwl_pcie_txq_progress(txq); - if (iwl_queue_space(txq) > txq->low_mark && + if (iwl_queue_space(trans, txq) > txq->low_mark && test_bit(txq_id, trans_pcie->queue_stopped)) { struct sk_buff_head overflow_skbs; @@ -1155,7 +1166,7 @@ void iwl_trans_pcie_reclaim(struct iwl_trans *trans, int txq_id, int ssn, } spin_lock_bh(&txq->lock); - if (iwl_queue_space(txq) > txq->low_mark) + if (iwl_queue_space(trans, txq) > txq->low_mark) iwl_wake_queue(trans, txq); } @@ -1225,23 +1236,30 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx) struct iwl_txq *txq = trans_pcie->txq[txq_id]; unsigned long flags; int nfreed = 0; + u16 r; lockdep_assert_held(&txq->lock); - if ((idx >= TFD_QUEUE_SIZE_MAX) || (!iwl_queue_used(txq, idx))) { + idx = iwl_pcie_get_cmd_index(txq, idx); + r = iwl_pcie_get_cmd_index(txq, txq->read_ptr); + + if (idx >= trans->cfg->base_params->max_tfd_queue_size || + (!iwl_queue_used(txq, idx))) { IWL_ERR(trans, "%s: Read index for DMA queue txq id (%d), index %d is out of range [0-%d] %d %d.\n", - __func__, txq_id, idx, TFD_QUEUE_SIZE_MAX, + __func__, txq_id, idx, + trans->cfg->base_params->max_tfd_queue_size, txq->write_ptr, txq->read_ptr); return; } - for (idx = iwl_queue_inc_wrap(idx); txq->read_ptr != idx; - txq->read_ptr = iwl_queue_inc_wrap(txq->read_ptr)) { + for (idx = iwl_queue_inc_wrap(trans, idx); r != idx; + r = iwl_queue_inc_wrap(trans, r)) { + txq->read_ptr = iwl_queue_inc_wrap(trans, txq->read_ptr); if (nfreed++ > 0) { IWL_ERR(trans, "HCMD skipped: index (%d) %d %d\n", - idx, txq->write_ptr, txq->read_ptr); + idx, txq->write_ptr, r); iwl_force_nmi(trans); } } @@ -1555,7 +1573,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, spin_lock_bh(&txq->lock); - if (iwl_queue_space(txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { + if (iwl_queue_space(trans, txq) < ((cmd->flags & CMD_ASYNC) ? 2 : 1)) { spin_unlock_bh(&txq->lock); IWL_ERR(trans, "No space in command queue\n"); @@ -1711,7 +1729,7 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans, } /* Increment and update queue's write index */ - txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr); + txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr); iwl_pcie_txq_inc_wr_ptr(trans, txq); spin_unlock_irqrestore(&trans_pcie->reg_lock, flags); @@ -2311,11 +2329,11 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, spin_lock(&txq->lock); - if (iwl_queue_space(txq) < txq->high_mark) { + if (iwl_queue_space(trans, txq) < txq->high_mark) { iwl_stop_queue(trans, txq); /* don't put the packet on the ring, if there is no room */ - if (unlikely(iwl_queue_space(txq) < 3)) { + if (unlikely(iwl_queue_space(trans, txq) < 3)) { struct iwl_device_cmd **dev_cmd_ptr; dev_cmd_ptr = (void *)((u8 *)skb->cb + @@ -2444,7 +2462,7 @@ int iwl_trans_pcie_tx(struct iwl_trans *trans, struct sk_buff *skb, } /* Tell device the write index *just past* this latest filled TFD */ - txq->write_ptr = iwl_queue_inc_wrap(txq->write_ptr); + txq->write_ptr = iwl_queue_inc_wrap(trans, txq->write_ptr); if (!wait_write_ptr) iwl_pcie_txq_inc_wr_ptr(trans, txq); |