summaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/net/wireless/iwlwifi/iwl-agn-lib.c')
-rw-r--r--drivers/net/wireless/iwlwifi/iwl-agn-lib.c774
1 files changed, 213 insertions, 561 deletions
diff --git a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
index f803fb62f8bc..3bee0f119bcd 100644
--- a/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
+++ b/drivers/net/wireless/iwlwifi/iwl-agn-lib.c
@@ -39,6 +39,7 @@
#include "iwl-agn-hw.h"
#include "iwl-agn.h"
#include "iwl-sta.h"
+#include "iwl-trans.h"
static inline u32 iwlagn_get_scd_ssn(struct iwlagn_tx_resp *tx_resp)
{
@@ -52,73 +53,73 @@ static void iwlagn_count_tx_err_status(struct iwl_priv *priv, u16 status)
switch (status) {
case TX_STATUS_POSTPONE_DELAY:
- priv->_agn.reply_tx_stats.pp_delay++;
+ priv->reply_tx_stats.pp_delay++;
break;
case TX_STATUS_POSTPONE_FEW_BYTES:
- priv->_agn.reply_tx_stats.pp_few_bytes++;
+ priv->reply_tx_stats.pp_few_bytes++;
break;
case TX_STATUS_POSTPONE_BT_PRIO:
- priv->_agn.reply_tx_stats.pp_bt_prio++;
+ priv->reply_tx_stats.pp_bt_prio++;
break;
case TX_STATUS_POSTPONE_QUIET_PERIOD:
- priv->_agn.reply_tx_stats.pp_quiet_period++;
+ priv->reply_tx_stats.pp_quiet_period++;
break;
case TX_STATUS_POSTPONE_CALC_TTAK:
- priv->_agn.reply_tx_stats.pp_calc_ttak++;
+ priv->reply_tx_stats.pp_calc_ttak++;
break;
case TX_STATUS_FAIL_INTERNAL_CROSSED_RETRY:
- priv->_agn.reply_tx_stats.int_crossed_retry++;
+ priv->reply_tx_stats.int_crossed_retry++;
break;
case TX_STATUS_FAIL_SHORT_LIMIT:
- priv->_agn.reply_tx_stats.short_limit++;
+ priv->reply_tx_stats.short_limit++;
break;
case TX_STATUS_FAIL_LONG_LIMIT:
- priv->_agn.reply_tx_stats.long_limit++;
+ priv->reply_tx_stats.long_limit++;
break;
case TX_STATUS_FAIL_FIFO_UNDERRUN:
- priv->_agn.reply_tx_stats.fifo_underrun++;
+ priv->reply_tx_stats.fifo_underrun++;
break;
case TX_STATUS_FAIL_DRAIN_FLOW:
- priv->_agn.reply_tx_stats.drain_flow++;
+ priv->reply_tx_stats.drain_flow++;
break;
case TX_STATUS_FAIL_RFKILL_FLUSH:
- priv->_agn.reply_tx_stats.rfkill_flush++;
+ priv->reply_tx_stats.rfkill_flush++;
break;
case TX_STATUS_FAIL_LIFE_EXPIRE:
- priv->_agn.reply_tx_stats.life_expire++;
+ priv->reply_tx_stats.life_expire++;
break;
case TX_STATUS_FAIL_DEST_PS:
- priv->_agn.reply_tx_stats.dest_ps++;
+ priv->reply_tx_stats.dest_ps++;
break;
case TX_STATUS_FAIL_HOST_ABORTED:
- priv->_agn.reply_tx_stats.host_abort++;
+ priv->reply_tx_stats.host_abort++;
break;
case TX_STATUS_FAIL_BT_RETRY:
- priv->_agn.reply_tx_stats.bt_retry++;
+ priv->reply_tx_stats.bt_retry++;
break;
case TX_STATUS_FAIL_STA_INVALID:
- priv->_agn.reply_tx_stats.sta_invalid++;
+ priv->reply_tx_stats.sta_invalid++;
break;
case TX_STATUS_FAIL_FRAG_DROPPED:
- priv->_agn.reply_tx_stats.frag_drop++;
+ priv->reply_tx_stats.frag_drop++;
break;
case TX_STATUS_FAIL_TID_DISABLE:
- priv->_agn.reply_tx_stats.tid_disable++;
+ priv->reply_tx_stats.tid_disable++;
break;
case TX_STATUS_FAIL_FIFO_FLUSHED:
- priv->_agn.reply_tx_stats.fifo_flush++;
+ priv->reply_tx_stats.fifo_flush++;
break;
case TX_STATUS_FAIL_INSUFFICIENT_CF_POLL:
- priv->_agn.reply_tx_stats.insuff_cf_poll++;
+ priv->reply_tx_stats.insuff_cf_poll++;
break;
case TX_STATUS_FAIL_PASSIVE_NO_RX:
- priv->_agn.reply_tx_stats.fail_hw_drop++;
+ priv->reply_tx_stats.fail_hw_drop++;
break;
case TX_STATUS_FAIL_NO_BEACON_ON_RADAR:
- priv->_agn.reply_tx_stats.sta_color_mismatch++;
+ priv->reply_tx_stats.sta_color_mismatch++;
break;
default:
- priv->_agn.reply_tx_stats.unknown++;
+ priv->reply_tx_stats.unknown++;
break;
}
}
@@ -129,43 +130,43 @@ static void iwlagn_count_agg_tx_err_status(struct iwl_priv *priv, u16 status)
switch (status) {
case AGG_TX_STATE_UNDERRUN_MSK:
- priv->_agn.reply_agg_tx_stats.underrun++;
+ priv->reply_agg_tx_stats.underrun++;
break;
case AGG_TX_STATE_BT_PRIO_MSK:
- priv->_agn.reply_agg_tx_stats.bt_prio++;
+ priv->reply_agg_tx_stats.bt_prio++;
break;
case AGG_TX_STATE_FEW_BYTES_MSK:
- priv->_agn.reply_agg_tx_stats.few_bytes++;
+ priv->reply_agg_tx_stats.few_bytes++;
break;
case AGG_TX_STATE_ABORT_MSK:
- priv->_agn.reply_agg_tx_stats.abort++;
+ priv->reply_agg_tx_stats.abort++;
break;
case AGG_TX_STATE_LAST_SENT_TTL_MSK:
- priv->_agn.reply_agg_tx_stats.last_sent_ttl++;
+ priv->reply_agg_tx_stats.last_sent_ttl++;
break;
case AGG_TX_STATE_LAST_SENT_TRY_CNT_MSK:
- priv->_agn.reply_agg_tx_stats.last_sent_try++;
+ priv->reply_agg_tx_stats.last_sent_try++;
break;
case AGG_TX_STATE_LAST_SENT_BT_KILL_MSK:
- priv->_agn.reply_agg_tx_stats.last_sent_bt_kill++;
+ priv->reply_agg_tx_stats.last_sent_bt_kill++;
break;
case AGG_TX_STATE_SCD_QUERY_MSK:
- priv->_agn.reply_agg_tx_stats.scd_query++;
+ priv->reply_agg_tx_stats.scd_query++;
break;
case AGG_TX_STATE_TEST_BAD_CRC32_MSK:
- priv->_agn.reply_agg_tx_stats.bad_crc32++;
+ priv->reply_agg_tx_stats.bad_crc32++;
break;
case AGG_TX_STATE_RESPONSE_MSK:
- priv->_agn.reply_agg_tx_stats.response++;
+ priv->reply_agg_tx_stats.response++;
break;
case AGG_TX_STATE_DUMP_TX_MSK:
- priv->_agn.reply_agg_tx_stats.dump_tx++;
+ priv->reply_agg_tx_stats.dump_tx++;
break;
case AGG_TX_STATE_DELAY_TX_MSK:
- priv->_agn.reply_agg_tx_stats.delay_tx++;
+ priv->reply_agg_tx_stats.delay_tx++;
break;
default:
- priv->_agn.reply_agg_tx_stats.unknown++;
+ priv->reply_agg_tx_stats.unknown++;
break;
}
}
@@ -390,8 +391,7 @@ void iwl_check_abort_status(struct iwl_priv *priv,
}
}
-static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
- struct iwl_rx_mem_buffer *rxb)
+void iwlagn_rx_reply_tx(struct iwl_priv *priv, struct iwl_rx_mem_buffer *rxb)
{
struct iwl_rx_packet *pkt = rxb_addr(rxb);
u16 sequence = le16_to_cpu(pkt->hdr.sequence);
@@ -400,6 +400,7 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
struct iwl_tx_queue *txq = &priv->txq[txq_id];
struct ieee80211_tx_info *info;
struct iwlagn_tx_resp *tx_resp = (void *)&pkt->u.raw[0];
+ struct ieee80211_hdr *hdr;
struct iwl_tx_info *txb;
u32 status = le16_to_cpu(tx_resp->status.status);
int tid;
@@ -408,9 +409,9 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
unsigned long flags;
if ((index >= txq->q.n_bd) || (iwl_queue_used(&txq->q, index) == 0)) {
- IWL_ERR(priv, "Read index for DMA queue txq_id (%d) index %d "
- "is out of range [0-%d] %d %d\n", txq_id,
- index, txq->q.n_bd, txq->q.write_ptr,
+ IWL_ERR(priv, "%s: Read index for DMA queue txq_id (%d) "
+ "index %d is out of range [0-%d] %d %d\n", __func__,
+ txq_id, index, txq->q.n_bd, txq->q.write_ptr,
txq->q.read_ptr);
return;
}
@@ -426,6 +427,11 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
IWLAGN_TX_RES_RA_POS;
spin_lock_irqsave(&priv->sta_lock, flags);
+
+ hdr = (void *)txb->skb->data;
+ if (!ieee80211_is_data_qos(hdr->frame_control))
+ priv->last_seq_ctl = tx_resp->seq_ctl;
+
if (txq->sched_retry) {
const u32 scd_ssn = iwlagn_get_scd_ssn(tx_resp);
struct iwl_ht_agg *agg;
@@ -438,7 +444,7 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
if (tx_resp->bt_kill_count && tx_resp->frame_count == 1 &&
priv->cfg->bt_params &&
priv->cfg->bt_params->advanced_bt_coexist) {
- IWL_WARN(priv, "receive reply tx with bt_kill\n");
+ IWL_DEBUG_COEX(priv, "receive reply tx with bt_kill\n");
}
iwlagn_tx_status_reply_tx(priv, agg, tx_resp, txq_id, index);
@@ -478,27 +484,6 @@ static void iwlagn_rx_reply_tx(struct iwl_priv *priv,
spin_unlock_irqrestore(&priv->sta_lock, flags);
}
-void iwlagn_rx_handler_setup(struct iwl_priv *priv)
-{
- /* init calibration handlers */
- priv->rx_handlers[CALIBRATION_RES_NOTIFICATION] =
- iwlagn_rx_calib_result;
- priv->rx_handlers[REPLY_TX] = iwlagn_rx_reply_tx;
-
- /* set up notification wait support */
- spin_lock_init(&priv->_agn.notif_wait_lock);
- INIT_LIST_HEAD(&priv->_agn.notif_waits);
- init_waitqueue_head(&priv->_agn.notif_waitq);
-}
-
-void iwlagn_setup_deferred_work(struct iwl_priv *priv)
-{
- /*
- * nothing need to be done here anymore
- * still keep for future use if needed
- */
-}
-
int iwlagn_hw_valid_rtc_data_addr(u32 addr)
{
return (addr >= IWLAGN_RTC_DATA_LOWER_BOUND) &&
@@ -540,8 +525,8 @@ int iwlagn_send_tx_power(struct iwl_priv *priv)
else
tx_ant_cfg_cmd = REPLY_TX_POWER_DBM_CMD;
- return iwl_send_cmd_pdu(priv, tx_ant_cfg_cmd, sizeof(tx_power_cmd),
- &tx_power_cmd);
+ return trans_send_cmd_pdu(&priv->trans, tx_ant_cfg_cmd, CMD_SYNC,
+ sizeof(tx_power_cmd), &tx_power_cmd);
}
void iwlagn_temperature(struct iwl_priv *priv)
@@ -610,8 +595,7 @@ static u32 eeprom_indirect_address(const struct iwl_priv *priv, u32 address)
return (address & ADDRESS_MSK) + (offset << 1);
}
-const u8 *iwlagn_eeprom_query_addr(const struct iwl_priv *priv,
- size_t offset)
+const u8 *iwl_eeprom_query_addr(const struct iwl_priv *priv, size_t offset)
{
u32 address = eeprom_indirect_address(priv, offset);
BUG_ON(address >= priv->cfg->base_params->eeprom_size);
@@ -622,367 +606,12 @@ struct iwl_mod_params iwlagn_mod_params = {
.amsdu_size_8K = 1,
.restart_fw = 1,
.plcp_check = true,
+ .bt_coex_active = true,
+ .no_sleep_autoadjust = true,
+ .power_level = IWL_POWER_INDEX_1,
/* the rest are 0 by default */
};
-void iwlagn_rx_queue_reset(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- unsigned long flags;
- int i;
- spin_lock_irqsave(&rxq->lock, flags);
- INIT_LIST_HEAD(&rxq->rx_free);
- INIT_LIST_HEAD(&rxq->rx_used);
- /* Fill the rx_used queue with _all_ of the Rx buffers */
- for (i = 0; i < RX_FREE_BUFFERS + RX_QUEUE_SIZE; i++) {
- /* In the reset function, these buffers may have been allocated
- * to an SKB, so we need to unmap and free potential storage */
- if (rxq->pool[i].page != NULL) {
- pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- __iwl_free_pages(priv, rxq->pool[i].page);
- rxq->pool[i].page = NULL;
- }
- list_add_tail(&rxq->pool[i].list, &rxq->rx_used);
- }
-
- for (i = 0; i < RX_QUEUE_SIZE; i++)
- rxq->queue[i] = NULL;
-
- /* Set us so that we have processed and used all buffers, but have
- * not restocked the Rx queue with fresh buffers */
- rxq->read = rxq->write = 0;
- rxq->write_actual = 0;
- rxq->free_count = 0;
- spin_unlock_irqrestore(&rxq->lock, flags);
-}
-
-int iwlagn_rx_init(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- u32 rb_size;
- const u32 rfdnlog = RX_QUEUE_SIZE_LOG; /* 256 RBDs */
- u32 rb_timeout = 0; /* FIXME: RX_RB_TIMEOUT for all devices? */
-
- rb_timeout = RX_RB_TIMEOUT;
-
- if (iwlagn_mod_params.amsdu_size_8K)
- rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_8K;
- else
- rb_size = FH_RCSR_RX_CONFIG_REG_VAL_RB_SIZE_4K;
-
- /* Stop Rx DMA */
- iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
-
- /* Reset driver's Rx queue write index */
- iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_WPTR_REG, 0);
-
- /* Tell device where to find RBD circular buffer in DRAM */
- iwl_write_direct32(priv, FH_RSCSR_CHNL0_RBDCB_BASE_REG,
- (u32)(rxq->bd_dma >> 8));
-
- /* Tell device where in DRAM to update its Rx status */
- iwl_write_direct32(priv, FH_RSCSR_CHNL0_STTS_WPTR_REG,
- rxq->rb_stts_dma >> 4);
-
- /* Enable Rx DMA
- * FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY is set because of HW bug in
- * the credit mechanism in 5000 HW RX FIFO
- * Direct rx interrupts to hosts
- * Rx buffer size 4 or 8k
- * RB timeout 0x10
- * 256 RBDs
- */
- iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG,
- FH_RCSR_RX_CONFIG_CHNL_EN_ENABLE_VAL |
- FH_RCSR_CHNL0_RX_IGNORE_RXF_EMPTY |
- FH_RCSR_CHNL0_RX_CONFIG_IRQ_DEST_INT_HOST_VAL |
- FH_RCSR_CHNL0_RX_CONFIG_SINGLE_FRAME_MSK |
- rb_size|
- (rb_timeout << FH_RCSR_RX_CONFIG_REG_IRQ_RBTH_POS)|
- (rfdnlog << FH_RCSR_RX_CONFIG_RBDCB_SIZE_POS));
-
- /* Set interrupt coalescing timer to default (2048 usecs) */
- iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_TIMEOUT_DEF);
-
- return 0;
-}
-
-static void iwlagn_set_pwr_vmain(struct iwl_priv *priv)
-{
-/*
- * (for documentation purposes)
- * to set power to V_AUX, do:
-
- if (pci_pme_capable(priv->pci_dev, PCI_D3cold))
- iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_PWR_SRC_VAUX,
- ~APMG_PS_CTRL_MSK_PWR_SRC);
- */
-
- iwl_set_bits_mask_prph(priv, APMG_PS_CTRL_REG,
- APMG_PS_CTRL_VAL_PWR_SRC_VMAIN,
- ~APMG_PS_CTRL_MSK_PWR_SRC);
-}
-
-int iwlagn_hw_nic_init(struct iwl_priv *priv)
-{
- unsigned long flags;
- struct iwl_rx_queue *rxq = &priv->rxq;
- int ret;
-
- /* nic_init */
- spin_lock_irqsave(&priv->lock, flags);
- priv->cfg->ops->lib->apm_ops.init(priv);
-
- /* Set interrupt coalescing calibration timer to default (512 usecs) */
- iwl_write8(priv, CSR_INT_COALESCING, IWL_HOST_INT_CALIB_TIMEOUT_DEF);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- iwlagn_set_pwr_vmain(priv);
-
- priv->cfg->ops->lib->apm_ops.config(priv);
-
- /* Allocate the RX queue, or reset if it is already allocated */
- if (!rxq->bd) {
- ret = iwl_rx_queue_alloc(priv);
- if (ret) {
- IWL_ERR(priv, "Unable to initialize Rx queue\n");
- return -ENOMEM;
- }
- } else
- iwlagn_rx_queue_reset(priv, rxq);
-
- iwlagn_rx_replenish(priv);
-
- iwlagn_rx_init(priv, rxq);
-
- spin_lock_irqsave(&priv->lock, flags);
-
- rxq->need_update = 1;
- iwl_rx_queue_update_write_ptr(priv, rxq);
-
- spin_unlock_irqrestore(&priv->lock, flags);
-
- /* Allocate or reset and init all Tx and Command queues */
- if (!priv->txq) {
- ret = iwlagn_txq_ctx_alloc(priv);
- if (ret)
- return ret;
- } else
- iwlagn_txq_ctx_reset(priv);
-
- if (priv->cfg->base_params->shadow_reg_enable) {
- /* enable shadow regs in HW */
- iwl_set_bit(priv, CSR_MAC_SHADOW_REG_CTRL,
- 0x800FFFFF);
- }
-
- set_bit(STATUS_INIT, &priv->status);
-
- return 0;
-}
-
-/**
- * iwlagn_dma_addr2rbd_ptr - convert a DMA address to a uCode read buffer ptr
- */
-static inline __le32 iwlagn_dma_addr2rbd_ptr(struct iwl_priv *priv,
- dma_addr_t dma_addr)
-{
- return cpu_to_le32((u32)(dma_addr >> 8));
-}
-
-/**
- * iwlagn_rx_queue_restock - refill RX queue from pre-allocated pool
- *
- * If there are slots in the RX queue that need to be restocked,
- * and we have free pre-allocated buffers, fill the ranks as much
- * as we can, pulling from rx_free.
- *
- * This moves the 'write' index forward to catch up with 'processed', and
- * also updates the memory address in the firmware to reference the new
- * target buffer.
- */
-void iwlagn_rx_queue_restock(struct iwl_priv *priv)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct list_head *element;
- struct iwl_rx_mem_buffer *rxb;
- unsigned long flags;
-
- spin_lock_irqsave(&rxq->lock, flags);
- while ((iwl_rx_queue_space(rxq) > 0) && (rxq->free_count)) {
- /* The overwritten rxb must be a used one */
- rxb = rxq->queue[rxq->write];
- BUG_ON(rxb && rxb->page);
-
- /* Get next free Rx buffer, remove from free list */
- element = rxq->rx_free.next;
- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
- list_del(element);
-
- /* Point to Rx buffer via next RBD in circular buffer */
- rxq->bd[rxq->write] = iwlagn_dma_addr2rbd_ptr(priv,
- rxb->page_dma);
- rxq->queue[rxq->write] = rxb;
- rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
- rxq->free_count--;
- }
- spin_unlock_irqrestore(&rxq->lock, flags);
- /* If the pre-allocated buffer pool is dropping low, schedule to
- * refill it */
- if (rxq->free_count <= RX_LOW_WATERMARK)
- queue_work(priv->workqueue, &priv->rx_replenish);
-
-
- /* If we've added more space for the firmware to place data, tell it.
- * Increment device's write pointer in multiples of 8. */
- if (rxq->write_actual != (rxq->write & ~0x7)) {
- spin_lock_irqsave(&rxq->lock, flags);
- rxq->need_update = 1;
- spin_unlock_irqrestore(&rxq->lock, flags);
- iwl_rx_queue_update_write_ptr(priv, rxq);
- }
-}
-
-/**
- * iwlagn_rx_replenish - Move all used packet from rx_used to rx_free
- *
- * When moving to rx_free an SKB is allocated for the slot.
- *
- * Also restock the Rx queue via iwl_rx_queue_restock.
- * This is called as a scheduled work item (except for during initialization)
- */
-void iwlagn_rx_allocate(struct iwl_priv *priv, gfp_t priority)
-{
- struct iwl_rx_queue *rxq = &priv->rxq;
- struct list_head *element;
- struct iwl_rx_mem_buffer *rxb;
- struct page *page;
- unsigned long flags;
- gfp_t gfp_mask = priority;
-
- while (1) {
- spin_lock_irqsave(&rxq->lock, flags);
- if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
- return;
- }
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- if (rxq->free_count > RX_LOW_WATERMARK)
- gfp_mask |= __GFP_NOWARN;
-
- if (priv->hw_params.rx_page_order > 0)
- gfp_mask |= __GFP_COMP;
-
- /* Alloc a new receive buffer */
- page = alloc_pages(gfp_mask, priv->hw_params.rx_page_order);
- if (!page) {
- if (net_ratelimit())
- IWL_DEBUG_INFO(priv, "alloc_pages failed, "
- "order: %d\n",
- priv->hw_params.rx_page_order);
-
- if ((rxq->free_count <= RX_LOW_WATERMARK) &&
- net_ratelimit())
- IWL_CRIT(priv, "Failed to alloc_pages with %s. Only %u free buffers remaining.\n",
- priority == GFP_ATOMIC ? "GFP_ATOMIC" : "GFP_KERNEL",
- rxq->free_count);
- /* We don't reschedule replenish work here -- we will
- * call the restock method and if it still needs
- * more buffers it will schedule replenish */
- return;
- }
-
- spin_lock_irqsave(&rxq->lock, flags);
-
- if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
- __free_pages(page, priv->hw_params.rx_page_order);
- return;
- }
- element = rxq->rx_used.next;
- rxb = list_entry(element, struct iwl_rx_mem_buffer, list);
- list_del(element);
-
- spin_unlock_irqrestore(&rxq->lock, flags);
-
- BUG_ON(rxb->page);
- rxb->page = page;
- /* Get physical address of the RB */
- rxb->page_dma = pci_map_page(priv->pci_dev, page, 0,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- /* dma address must be no more than 36 bits */
- BUG_ON(rxb->page_dma & ~DMA_BIT_MASK(36));
- /* and also 256 byte aligned! */
- BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
-
- spin_lock_irqsave(&rxq->lock, flags);
-
- list_add_tail(&rxb->list, &rxq->rx_free);
- rxq->free_count++;
-
- spin_unlock_irqrestore(&rxq->lock, flags);
- }
-}
-
-void iwlagn_rx_replenish(struct iwl_priv *priv)
-{
- unsigned long flags;
-
- iwlagn_rx_allocate(priv, GFP_KERNEL);
-
- spin_lock_irqsave(&priv->lock, flags);
- iwlagn_rx_queue_restock(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
-}
-
-void iwlagn_rx_replenish_now(struct iwl_priv *priv)
-{
- iwlagn_rx_allocate(priv, GFP_ATOMIC);
-
- iwlagn_rx_queue_restock(priv);
-}
-
-/* Assumes that the skb field of the buffers in 'pool' is kept accurate.
- * If an SKB has been detached, the POOL needs to have its SKB set to NULL
- * This free routine walks the list of POOL entries and if SKB is set to
- * non NULL it is unmapped and freed
- */
-void iwlagn_rx_queue_free(struct iwl_priv *priv, struct iwl_rx_queue *rxq)
-{
- int i;
- for (i = 0; i < RX_QUEUE_SIZE + RX_FREE_BUFFERS; i++) {
- if (rxq->pool[i].page != NULL) {
- pci_unmap_page(priv->pci_dev, rxq->pool[i].page_dma,
- PAGE_SIZE << priv->hw_params.rx_page_order,
- PCI_DMA_FROMDEVICE);
- __iwl_free_pages(priv, rxq->pool[i].page);
- rxq->pool[i].page = NULL;
- }
- }
-
- dma_free_coherent(&priv->pci_dev->dev, 4 * RX_QUEUE_SIZE, rxq->bd,
- rxq->bd_dma);
- dma_free_coherent(&priv->pci_dev->dev, sizeof(struct iwl_rb_status),
- rxq->rb_stts, rxq->rb_stts_dma);
- rxq->bd = NULL;
- rxq->rb_stts = NULL;
-}
-
-int iwlagn_rxq_stop(struct iwl_priv *priv)
-{
-
- /* stop Rx DMA */
- iwl_write_direct32(priv, FH_MEM_RCSR_CHNL0_CONFIG_REG, 0);
- iwl_poll_direct_bit(priv, FH_MEM_RSSR_RX_STATUS_REG,
- FH_RSSR_CHNL0_RX_STATUS_CHNL_IDLE, 1000);
-
- return 0;
-}
-
int iwlagn_hwrate_to_mac80211_idx(u32 rate_n_flags, enum ieee80211_band band)
{
int idx = 0;
@@ -1126,7 +755,7 @@ static int iwl_get_channels_for_scan(struct iwl_priv *priv,
static int iwl_fill_offch_tx(struct iwl_priv *priv, void *data, size_t maxlen)
{
- struct sk_buff *skb = priv->_agn.offchan_tx_skb;
+ struct sk_buff *skb = priv->offchan_tx_skb;
if (skb->len < maxlen)
maxlen = skb->len;
@@ -1141,6 +770,7 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
struct iwl_host_cmd cmd = {
.id = REPLY_SCAN_CMD,
.len = { sizeof(struct iwl_scan_cmd), },
+ .flags = CMD_SYNC,
};
struct iwl_scan_cmd *scan;
struct iwl_rxon_context *ctx = &priv->contexts[IWL_RXON_CTX_BSS];
@@ -1211,7 +841,7 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
} else if (priv->scan_type == IWL_SCAN_OFFCH_TX) {
scan->suspend_time = 0;
scan->max_out_time =
- cpu_to_le32(1024 * priv->_agn.offchan_tx_timeout);
+ cpu_to_le32(1024 * priv->offchan_tx_timeout);
}
switch (priv->scan_type) {
@@ -1399,9 +1029,9 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
scan_ch = (void *)&scan->data[cmd_len];
scan_ch->type = SCAN_CHANNEL_TYPE_ACTIVE;
scan_ch->channel =
- cpu_to_le16(priv->_agn.offchan_tx_chan->hw_value);
+ cpu_to_le16(priv->offchan_tx_chan->hw_value);
scan_ch->active_dwell =
- cpu_to_le16(priv->_agn.offchan_tx_timeout);
+ cpu_to_le16(priv->offchan_tx_timeout);
scan_ch->passive_dwell = 0;
/* Set txpower levels to defaults */
@@ -1411,7 +1041,7 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
* power level:
* scan_ch->tx_gain = ((1 << 5) | (2 << 3)) | 3;
*/
- if (priv->_agn.offchan_tx_chan->band == IEEE80211_BAND_5GHZ)
+ if (priv->offchan_tx_chan->band == IEEE80211_BAND_5GHZ)
scan_ch->tx_gain = ((1 << 5) | (3 << 3)) | 3;
else
scan_ch->tx_gain = ((1 << 5) | (5 << 3));
@@ -1433,17 +1063,14 @@ int iwlagn_request_scan(struct iwl_priv *priv, struct ieee80211_vif *vif)
/* set scan bit here for PAN params */
set_bit(STATUS_SCAN_HW, &priv->status);
- if (priv->cfg->ops->hcmd->set_pan_params) {
- ret = priv->cfg->ops->hcmd->set_pan_params(priv);
- if (ret)
- return ret;
- }
+ ret = iwlagn_set_pan_params(priv);
+ if (ret)
+ return ret;
- ret = iwl_send_cmd_sync(priv, &cmd);
+ ret = trans_send_cmd(&priv->trans, &cmd);
if (ret) {
clear_bit(STATUS_SCAN_HW, &priv->status);
- if (priv->cfg->ops->hcmd->set_pan_params)
- priv->cfg->ops->hcmd->set_pan_params(priv);
+ iwlagn_set_pan_params(priv);
}
return ret;
@@ -1528,23 +1155,32 @@ int iwlagn_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
might_sleep();
memset(&flush_cmd, 0, sizeof(flush_cmd));
- flush_cmd.fifo_control = IWL_TX_FIFO_VO_MSK | IWL_TX_FIFO_VI_MSK |
- IWL_TX_FIFO_BE_MSK | IWL_TX_FIFO_BK_MSK;
- if (priv->cfg->sku & IWL_SKU_N)
+ if (flush_control & BIT(IWL_RXON_CTX_BSS))
+ flush_cmd.fifo_control = IWL_SCD_VO_MSK | IWL_SCD_VI_MSK |
+ IWL_SCD_BE_MSK | IWL_SCD_BK_MSK |
+ IWL_SCD_MGMT_MSK;
+ if ((flush_control & BIT(IWL_RXON_CTX_PAN)) &&
+ (priv->valid_contexts != BIT(IWL_RXON_CTX_BSS)))
+ flush_cmd.fifo_control |= IWL_PAN_SCD_VO_MSK |
+ IWL_PAN_SCD_VI_MSK | IWL_PAN_SCD_BE_MSK |
+ IWL_PAN_SCD_BK_MSK | IWL_PAN_SCD_MGMT_MSK |
+ IWL_PAN_SCD_MULTICAST_MSK;
+
+ if (priv->cfg->sku & EEPROM_SKU_CAP_11N_ENABLE)
flush_cmd.fifo_control |= IWL_AGG_TX_QUEUE_MSK;
IWL_DEBUG_INFO(priv, "fifo queue control: 0X%x\n",
flush_cmd.fifo_control);
flush_cmd.flush_control = cpu_to_le16(flush_control);
- return iwl_send_cmd(priv, &cmd);
+ return trans_send_cmd(&priv->trans, &cmd);
}
void iwlagn_dev_txfifo_flush(struct iwl_priv *priv, u16 flush_control)
{
mutex_lock(&priv->mutex);
ieee80211_stop_queues(priv->hw);
- if (priv->cfg->ops->lib->txfifo_flush(priv, IWL_DROP_ALL)) {
+ if (iwlagn_txfifo_flush(priv, IWL_DROP_ALL)) {
IWL_ERR(priv, "flush request fail\n");
goto done;
}
@@ -1699,18 +1335,21 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
* (might be in monitor mode), or the interface is in
* IBSS mode (no proper uCode support for coex then).
*/
- if (!bt_coex_active || priv->iw_mode == NL80211_IFTYPE_ADHOC) {
+ if (!iwlagn_mod_params.bt_coex_active ||
+ priv->iw_mode == NL80211_IFTYPE_ADHOC) {
basic.flags = IWLAGN_BT_FLAG_COEX_MODE_DISABLED;
} else {
basic.flags = IWLAGN_BT_FLAG_COEX_MODE_3W <<
IWLAGN_BT_FLAG_COEX_MODE_SHIFT;
- if (priv->cfg->bt_params &&
- priv->cfg->bt_params->bt_sco_disable)
+
+ if (!priv->bt_enable_pspoll)
basic.flags |= IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE;
+ else
+ basic.flags &= ~IWLAGN_BT_FLAG_SYNC_2_BT_DISABLE;
if (priv->bt_ch_announce)
basic.flags |= IWLAGN_BT_FLAG_CHANNEL_INHIBITION;
- IWL_DEBUG_INFO(priv, "BT coex flag: 0X%x\n", basic.flags);
+ IWL_DEBUG_COEX(priv, "BT coex flag: 0X%x\n", basic.flags);
}
priv->bt_enable_flag = basic.flags;
if (priv->bt_full_concurrent)
@@ -1720,7 +1359,7 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
memcpy(basic.bt3_lookup_table, iwlagn_def_3w_lookup,
sizeof(iwlagn_def_3w_lookup));
- IWL_DEBUG_INFO(priv, "BT coex %s in %s mode\n",
+ IWL_DEBUG_COEX(priv, "BT coex %s in %s mode\n",
basic.flags ? "active" : "disabled",
priv->bt_full_concurrent ?
"full concurrency" : "3-wire");
@@ -1728,19 +1367,97 @@ void iwlagn_send_advance_bt_config(struct iwl_priv *priv)
if (priv->cfg->bt_params->bt_session_2) {
memcpy(&bt_cmd_2000.basic, &basic,
sizeof(basic));
- ret = iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
- sizeof(bt_cmd_2000), &bt_cmd_2000);
+ ret = trans_send_cmd_pdu(&priv->trans, REPLY_BT_CONFIG,
+ CMD_SYNC, sizeof(bt_cmd_2000), &bt_cmd_2000);
} else {
memcpy(&bt_cmd_6000.basic, &basic,
sizeof(basic));
- ret = iwl_send_cmd_pdu(priv, REPLY_BT_CONFIG,
- sizeof(bt_cmd_6000), &bt_cmd_6000);
+ ret = trans_send_cmd_pdu(&priv->trans, REPLY_BT_CONFIG,
+ CMD_SYNC, sizeof(bt_cmd_6000), &bt_cmd_6000);
}
if (ret)
IWL_ERR(priv, "failed to send BT Coex Config\n");
}
+void iwlagn_bt_adjust_rssi_monitor(struct iwl_priv *priv, bool rssi_ena)
+{
+ struct iwl_rxon_context *ctx, *found_ctx = NULL;
+ bool found_ap = false;
+
+ lockdep_assert_held(&priv->mutex);
+
+ /* Check whether AP or GO mode is active. */
+ if (rssi_ena) {
+ for_each_context(priv, ctx) {
+ if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_AP &&
+ iwl_is_associated_ctx(ctx)) {
+ found_ap = true;
+ break;
+ }
+ }
+ }
+
+ /*
+ * If disable was received or If GO/AP mode, disable RSSI
+ * measurements.
+ */
+ if (!rssi_ena || found_ap) {
+ if (priv->cur_rssi_ctx) {
+ ctx = priv->cur_rssi_ctx;
+ ieee80211_disable_rssi_reports(ctx->vif);
+ priv->cur_rssi_ctx = NULL;
+ }
+ return;
+ }
+
+ /*
+ * If rssi measurements need to be enabled, consider all cases now.
+ * Figure out how many contexts are active.
+ */
+ for_each_context(priv, ctx) {
+ if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION &&
+ iwl_is_associated_ctx(ctx)) {
+ found_ctx = ctx;
+ break;
+ }
+ }
+
+ /*
+ * rssi monitor already enabled for the correct interface...nothing
+ * to do.
+ */
+ if (found_ctx == priv->cur_rssi_ctx)
+ return;
+
+ /*
+ * Figure out if rssi monitor is currently enabled, and needs
+ * to be changed. If rssi monitor is already enabled, disable
+ * it first else just enable rssi measurements on the
+ * interface found above.
+ */
+ if (priv->cur_rssi_ctx) {
+ ctx = priv->cur_rssi_ctx;
+ if (ctx->vif)
+ ieee80211_disable_rssi_reports(ctx->vif);
+ }
+
+ priv->cur_rssi_ctx = found_ctx;
+
+ if (!found_ctx)
+ return;
+
+ ieee80211_enable_rssi_reports(found_ctx->vif,
+ IWLAGN_BT_PSP_MIN_RSSI_THRESHOLD,
+ IWLAGN_BT_PSP_MAX_RSSI_THRESHOLD);
+}
+
+static bool iwlagn_bt_traffic_is_sco(struct iwl_bt_uart_msg *uart_msg)
+{
+ return BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3 >>
+ BT_UART_MSG_FRAME3SCOESCO_POS;
+}
+
static void iwlagn_bt_traffic_change_work(struct work_struct *work)
{
struct iwl_priv *priv =
@@ -1758,7 +1475,7 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work)
* coex profile notifications. Ignore that since only bad consequence
* can be not matching debug print with actual state.
*/
- IWL_DEBUG_INFO(priv, "BT traffic load changes: %d\n",
+ IWL_DEBUG_COEX(priv, "BT traffic load changes: %d\n",
priv->bt_traffic_load);
switch (priv->bt_traffic_load) {
@@ -1793,23 +1510,43 @@ static void iwlagn_bt_traffic_change_work(struct work_struct *work)
if (test_bit(STATUS_SCAN_HW, &priv->status))
goto out;
- if (priv->cfg->ops->lib->update_chain_flags)
- priv->cfg->ops->lib->update_chain_flags(priv);
+ iwl_update_chain_flags(priv);
if (smps_request != -1) {
+ priv->current_ht_config.smps = smps_request;
for_each_context(priv, ctx) {
if (ctx->vif && ctx->vif->type == NL80211_IFTYPE_STATION)
ieee80211_request_smps(ctx->vif, smps_request);
}
}
+
+ /*
+ * Dynamic PS poll related functionality. Adjust RSSI measurements if
+ * necessary.
+ */
+ iwlagn_bt_coex_rssi_monitor(priv);
out:
mutex_unlock(&priv->mutex);
}
+/*
+ * If BT sco traffic, and RSSI monitor is enabled, move measurements to the
+ * correct interface or disable it if this is the last interface to be
+ * removed.
+ */
+void iwlagn_bt_coex_rssi_monitor(struct iwl_priv *priv)
+{
+ if (priv->bt_is_sco &&
+ priv->bt_traffic_load == IWL_BT_COEX_TRAFFIC_LOAD_CONTINUOUS)
+ iwlagn_bt_adjust_rssi_monitor(priv, true);
+ else
+ iwlagn_bt_adjust_rssi_monitor(priv, false);
+}
+
static void iwlagn_print_uartmsg(struct iwl_priv *priv,
struct iwl_bt_uart_msg *uart_msg)
{
- IWL_DEBUG_NOTIF(priv, "Message Type = 0x%X, SSN = 0x%X, "
+ IWL_DEBUG_COEX(priv, "Message Type = 0x%X, SSN = 0x%X, "
"Update Req = 0x%X",
(BT_UART_MSG_FRAME1MSGTYPE_MSK & uart_msg->frame1) >>
BT_UART_MSG_FRAME1MSGTYPE_POS,
@@ -1818,7 +1555,7 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv,
(BT_UART_MSG_FRAME1UPDATEREQ_MSK & uart_msg->frame1) >>
BT_UART_MSG_FRAME1UPDATEREQ_POS);
- IWL_DEBUG_NOTIF(priv, "Open connections = 0x%X, Traffic load = 0x%X, "
+ IWL_DEBUG_COEX(priv, "Open connections = 0x%X, Traffic load = 0x%X, "
"Chl_SeqN = 0x%X, In band = 0x%X",
(BT_UART_MSG_FRAME2OPENCONNECTIONS_MSK & uart_msg->frame2) >>
BT_UART_MSG_FRAME2OPENCONNECTIONS_POS,
@@ -1829,7 +1566,7 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv,
(BT_UART_MSG_FRAME2INBAND_MSK & uart_msg->frame2) >>
BT_UART_MSG_FRAME2INBAND_POS);
- IWL_DEBUG_NOTIF(priv, "SCO/eSCO = 0x%X, Sniff = 0x%X, A2DP = 0x%X, "
+ IWL_DEBUG_COEX(priv, "SCO/eSCO = 0x%X, Sniff = 0x%X, A2DP = 0x%X, "
"ACL = 0x%X, Master = 0x%X, OBEX = 0x%X",
(BT_UART_MSG_FRAME3SCOESCO_MSK & uart_msg->frame3) >>
BT_UART_MSG_FRAME3SCOESCO_POS,
@@ -1844,11 +1581,11 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv,
(BT_UART_MSG_FRAME3OBEX_MSK & uart_msg->frame3) >>
BT_UART_MSG_FRAME3OBEX_POS);
- IWL_DEBUG_NOTIF(priv, "Idle duration = 0x%X",
+ IWL_DEBUG_COEX(priv, "Idle duration = 0x%X",
(BT_UART_MSG_FRAME4IDLEDURATION_MSK & uart_msg->frame4) >>
BT_UART_MSG_FRAME4IDLEDURATION_POS);
- IWL_DEBUG_NOTIF(priv, "Tx Activity = 0x%X, Rx Activity = 0x%X, "
+ IWL_DEBUG_COEX(priv, "Tx Activity = 0x%X, Rx Activity = 0x%X, "
"eSCO Retransmissions = 0x%X",
(BT_UART_MSG_FRAME5TXACTIVITY_MSK & uart_msg->frame5) >>
BT_UART_MSG_FRAME5TXACTIVITY_POS,
@@ -1857,13 +1594,13 @@ static void iwlagn_print_uartmsg(struct iwl_priv *priv,
(BT_UART_MSG_FRAME5ESCORETRANSMIT_MSK & uart_msg->frame5) >>
BT_UART_MSG_FRAME5ESCORETRANSMIT_POS);
- IWL_DEBUG_NOTIF(priv, "Sniff Interval = 0x%X, Discoverable = 0x%X",
+ IWL_DEBUG_COEX(priv, "Sniff Interval = 0x%X, Discoverable = 0x%X",
(BT_UART_MSG_FRAME6SNIFFINTERVAL_MSK & uart_msg->frame6) >>
BT_UART_MSG_FRAME6SNIFFINTERVAL_POS,
(BT_UART_MSG_FRAME6DISCOVERABLE_MSK & uart_msg->frame6) >>
BT_UART_MSG_FRAME6DISCOVERABLE_POS);
- IWL_DEBUG_NOTIF(priv, "Sniff Activity = 0x%X, Page = "
+ IWL_DEBUG_COEX(priv, "Sniff Activity = 0x%X, Page = "
"0x%X, Inquiry = 0x%X, Connectable = 0x%X",
(BT_UART_MSG_FRAME7SNIFFACTIVITY_MSK & uart_msg->frame7) >>
BT_UART_MSG_FRAME7SNIFFACTIVITY_POS,
@@ -1913,14 +1650,16 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
return;
}
- IWL_DEBUG_NOTIF(priv, "BT Coex notification:\n");
- IWL_DEBUG_NOTIF(priv, " status: %d\n", coex->bt_status);
- IWL_DEBUG_NOTIF(priv, " traffic load: %d\n", coex->bt_traffic_load);
- IWL_DEBUG_NOTIF(priv, " CI compliance: %d\n",
+ IWL_DEBUG_COEX(priv, "BT Coex notification:\n");
+ IWL_DEBUG_COEX(priv, " status: %d\n", coex->bt_status);
+ IWL_DEBUG_COEX(priv, " traffic load: %d\n", coex->bt_traffic_load);
+ IWL_DEBUG_COEX(priv, " CI compliance: %d\n",
coex->bt_ci_compliance);
iwlagn_print_uartmsg(priv, uart_msg);
priv->last_bt_traffic_load = priv->bt_traffic_load;
+ priv->bt_is_sco = iwlagn_bt_traffic_is_sco(uart_msg);
+
if (priv->iw_mode != NL80211_IFTYPE_ADHOC) {
if (priv->bt_status != coex->bt_status ||
priv->last_bt_traffic_load != coex->bt_traffic_load) {
@@ -1954,15 +1693,12 @@ void iwlagn_bt_coex_profile_notif(struct iwl_priv *priv,
void iwlagn_bt_rx_handler_setup(struct iwl_priv *priv)
{
- iwlagn_rx_handler_setup(priv);
priv->rx_handlers[REPLY_BT_COEX_PROFILE_NOTIF] =
iwlagn_bt_coex_profile_notif;
}
void iwlagn_bt_setup_deferred_work(struct iwl_priv *priv)
{
- iwlagn_setup_deferred_work(priv);
-
INIT_WORK(&priv->bt_traffic_change_work,
iwlagn_bt_traffic_change_work);
}
@@ -2274,9 +2010,9 @@ void iwlagn_init_notification_wait(struct iwl_priv *priv,
wait_entry->triggered = false;
wait_entry->aborted = false;
- spin_lock_bh(&priv->_agn.notif_wait_lock);
- list_add(&wait_entry->list, &priv->_agn.notif_waits);
- spin_unlock_bh(&priv->_agn.notif_wait_lock);
+ spin_lock_bh(&priv->notif_wait_lock);
+ list_add(&wait_entry->list, &priv->notif_waits);
+ spin_unlock_bh(&priv->notif_wait_lock);
}
int iwlagn_wait_notification(struct iwl_priv *priv,
@@ -2285,13 +2021,13 @@ int iwlagn_wait_notification(struct iwl_priv *priv,
{
int ret;
- ret = wait_event_timeout(priv->_agn.notif_waitq,
+ ret = wait_event_timeout(priv->notif_waitq,
wait_entry->triggered || wait_entry->aborted,
timeout);
- spin_lock_bh(&priv->_agn.notif_wait_lock);
+ spin_lock_bh(&priv->notif_wait_lock);
list_del(&wait_entry->list);
- spin_unlock_bh(&priv->_agn.notif_wait_lock);
+ spin_unlock_bh(&priv->notif_wait_lock);
if (wait_entry->aborted)
return -EIO;
@@ -2305,91 +2041,7 @@ int iwlagn_wait_notification(struct iwl_priv *priv,
void iwlagn_remove_notification(struct iwl_priv *priv,
struct iwl_notification_wait *wait_entry)
{
- spin_lock_bh(&priv->_agn.notif_wait_lock);
+ spin_lock_bh(&priv->notif_wait_lock);
list_del(&wait_entry->list);
- spin_unlock_bh(&priv->_agn.notif_wait_lock);
-}
-
-int iwlagn_start_device(struct iwl_priv *priv)
-{
- int ret;
-
- if (iwl_prepare_card_hw(priv)) {
- IWL_WARN(priv, "Exit HW not ready\n");
- return -EIO;
- }
-
- /* If platform's RF_KILL switch is NOT set to KILL */
- if (iwl_read32(priv, CSR_GP_CNTRL) & CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW)
- clear_bit(STATUS_RF_KILL_HW, &priv->status);
- else
- set_bit(STATUS_RF_KILL_HW, &priv->status);
-
- if (iwl_is_rfkill(priv)) {
- wiphy_rfkill_set_hw_state(priv->hw->wiphy, true);
- iwl_enable_interrupts(priv);
- return -ERFKILL;
- }
-
- iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
-
- ret = iwlagn_hw_nic_init(priv);
- if (ret) {
- IWL_ERR(priv, "Unable to init nic\n");
- return ret;
- }
-
- /* make sure rfkill handshake bits are cleared */
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR,
- CSR_UCODE_DRV_GP1_BIT_CMD_BLOCKED);
-
- /* clear (again), then enable host interrupts */
- iwl_write32(priv, CSR_INT, 0xFFFFFFFF);
- iwl_enable_interrupts(priv);
-
- /* really make sure rfkill handshake bits are cleared */
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
- iwl_write32(priv, CSR_UCODE_DRV_GP1_CLR, CSR_UCODE_SW_BIT_RFKILL);
-
- return 0;
-}
-
-void iwlagn_stop_device(struct iwl_priv *priv)
-{
- unsigned long flags;
-
- /* stop and reset the on-board processor */
- iwl_write32(priv, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
-
- /* tell the device to stop sending interrupts */
- spin_lock_irqsave(&priv->lock, flags);
- iwl_disable_interrupts(priv);
- spin_unlock_irqrestore(&priv->lock, flags);
- iwl_synchronize_irq(priv);
-
- /* device going down, Stop using ICT table */
- iwl_disable_ict(priv);
-
- /*
- * If a HW restart happens during firmware loading,
- * then the firmware loading might call this function
- * and later it might be called again due to the
- * restart. So don't process again if the device is
- * already dead.
- */
- if (test_bit(STATUS_DEVICE_ENABLED, &priv->status)) {
- iwlagn_txq_ctx_stop(priv);
- iwlagn_rxq_stop(priv);
-
- /* Power-down device's busmaster DMA clocks */
- iwl_write_prph(priv, APMG_CLK_DIS_REG, APMG_CLK_VAL_DMA_CLK_RQT);
- udelay(5);
- }
-
- /* Make sure (redundant) we've released our request to stay awake */
- iwl_clear_bit(priv, CSR_GP_CNTRL, CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
-
- /* Stop the device, and put it in low power state */
- iwl_apm_stop(priv);
+ spin_unlock_bh(&priv->notif_wait_lock);
}