summaryrefslogtreecommitdiffstats
path: root/drivers/net/wireless/iwlwifi/pcie
diff options
context:
space:
mode:
authorJohn W. Linville <linville@tuxdriver.com>2014-01-03 15:34:41 -0500
committerJohn W. Linville <linville@tuxdriver.com>2014-01-03 15:34:41 -0500
commitc3c5bb31ea046721bc5fe3e54b45c7a13677f398 (patch)
treee924df7c6d1834e2fd5079d5c424c0a3f070f490 /drivers/net/wireless/iwlwifi/pcie
parent55d1cad2efe2e3567c3116f50ba10e5af1bc01fc (diff)
parent14648d6534477952633ee3ecadb31cf227414f13 (diff)
downloadlinux-c3c5bb31ea046721bc5fe3e54b45c7a13677f398.tar.gz
linux-c3c5bb31ea046721bc5fe3e54b45c7a13677f398.tar.bz2
linux-c3c5bb31ea046721bc5fe3e54b45c7a13677f398.zip
Merge branch 'for-john' of git://git.kernel.org/pub/scm/linux/kernel/git/iwlwifi/iwlwifi-next
Diffstat (limited to 'drivers/net/wireless/iwlwifi/pcie')
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/drv.c4
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/internal.h40
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/rx.c400
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/trans.c79
-rw-r--r--drivers/net/wireless/iwlwifi/pcie/tx.c106
5 files changed, 310 insertions, 319 deletions
diff --git a/drivers/net/wireless/iwlwifi/pcie/drv.c b/drivers/net/wireless/iwlwifi/pcie/drv.c
index 2e97a3995333..e58b8af56c04 100644
--- a/drivers/net/wireless/iwlwifi/pcie/drv.c
+++ b/drivers/net/wireless/iwlwifi/pcie/drv.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
diff --git a/drivers/net/wireless/iwlwifi/pcie/internal.h b/drivers/net/wireless/iwlwifi/pcie/internal.h
index 674c75b0d002..e851f26fd44c 100644
--- a/drivers/net/wireless/iwlwifi/pcie/internal.h
+++ b/drivers/net/wireless/iwlwifi/pcie/internal.h
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -262,6 +262,7 @@ iwl_pcie_get_scratchbuf_dma(struct iwl_txq *txq, int idx)
* @rx_page_order: page order for receive buffer size
* @wd_timeout: queue watchdog timeout (jiffies)
* @reg_lock: protect hw register access
+ * @cmd_in_flight: true when we have a host command in flight
*/
struct iwl_trans_pcie {
struct iwl_rxq rxq;
@@ -273,7 +274,6 @@ struct iwl_trans_pcie {
__le32 *ict_tbl;
dma_addr_t ict_tbl_dma;
int ict_index;
- u32 inta;
bool use_ict;
struct isr_statistics isr_stats;
@@ -311,6 +311,7 @@ struct iwl_trans_pcie {
/*protect hw register */
spinlock_t reg_lock;
+ bool cmd_in_flight;
};
#define IWL_TRANS_GET_PCIE_TRANS(_iwl_trans) \
@@ -343,7 +344,7 @@ void iwl_pcie_rx_free(struct iwl_trans *trans);
/*****************************************************
* ICT - interrupt handling
******************************************************/
-irqreturn_t iwl_pcie_isr_ict(int irq, void *data);
+irqreturn_t iwl_pcie_isr(int irq, void *data);
int iwl_pcie_alloc_ict(struct iwl_trans *trans);
void iwl_pcie_free_ict(struct iwl_trans *trans);
void iwl_pcie_reset_ict(struct iwl_trans *trans);
@@ -397,13 +398,17 @@ static inline void iwl_enable_interrupts(struct iwl_trans *trans)
IWL_DEBUG_ISR(trans, "Enabling interrupts\n");
set_bit(STATUS_INT_ENABLED, &trans->status);
+ trans_pcie->inta_mask = CSR_INI_SET_MASK;
iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
}
static inline void iwl_enable_rfkill_int(struct iwl_trans *trans)
{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+
IWL_DEBUG_ISR(trans, "Enabling rfkill interrupt\n");
- iwl_write32(trans, CSR_INT_MASK, CSR_INT_BIT_RF_KILL);
+ trans_pcie->inta_mask = CSR_INT_BIT_RF_KILL;
+ iwl_write32(trans, CSR_INT_MASK, trans_pcie->inta_mask);
}
static inline void iwl_wake_queue(struct iwl_trans *trans,
@@ -456,4 +461,31 @@ static inline bool iwl_is_rfkill_set(struct iwl_trans *trans)
CSR_GP_CNTRL_REG_FLAG_HW_RF_KILL_SW);
}
+static inline void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
+ u32 reg, u32 mask, u32 value)
+{
+ u32 v;
+
+#ifdef CONFIG_IWLWIFI_DEBUG
+ WARN_ON_ONCE(value & ~mask);
+#endif
+
+ v = iwl_read32(trans, reg);
+ v &= ~mask;
+ v |= value;
+ iwl_write32(trans, reg, v);
+}
+
+static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
+ u32 reg, u32 mask)
+{
+ __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
+}
+
+static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
+ u32 reg, u32 mask)
+{
+ __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
+}
+
#endif /* __iwl_trans_int_pcie_h__ */
diff --git a/drivers/net/wireless/iwlwifi/pcie/rx.c b/drivers/net/wireless/iwlwifi/pcie/rx.c
index 7aeec5ccefa5..1890ea29c264 100644
--- a/drivers/net/wireless/iwlwifi/pcie/rx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/rx.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -148,10 +148,9 @@ int iwl_pcie_rx_stop(struct iwl_trans *trans)
static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
struct iwl_rxq *rxq)
{
- unsigned long flags;
u32 reg;
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
if (rxq->need_update == 0)
goto exit_unlock;
@@ -190,7 +189,7 @@ static void iwl_pcie_rxq_inc_wr_ptr(struct iwl_trans *trans,
rxq->need_update = 0;
exit_unlock:
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
}
/*
@@ -209,7 +208,6 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_rx_mem_buffer *rxb;
- unsigned long flags;
/*
* If the device isn't enabled - not need to try to add buffers...
@@ -222,7 +220,7 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
if (!test_bit(STATUS_DEVICE_ENABLED, &trans->status))
return;
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
while ((iwl_rxq_space(rxq) > 0) && (rxq->free_count)) {
/* The overwritten rxb must be a used one */
rxb = rxq->queue[rxq->write];
@@ -239,7 +237,7 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
rxq->write = (rxq->write + 1) & RX_QUEUE_MASK;
rxq->free_count--;
}
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
/* If the pre-allocated buffer pool is dropping low, schedule to
* refill it */
if (rxq->free_count <= RX_LOW_WATERMARK)
@@ -248,9 +246,9 @@ static void iwl_pcie_rxq_restock(struct iwl_trans *trans)
/* If we've added more space for the firmware to place data, tell it.
* Increment device's write pointer in multiples of 8. */
if (rxq->write_actual != (rxq->write & ~0x7)) {
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
rxq->need_update = 1;
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
}
}
@@ -270,16 +268,15 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_rx_mem_buffer *rxb;
struct page *page;
- unsigned long flags;
gfp_t gfp_mask = priority;
while (1) {
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
return;
}
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
if (rxq->free_count > RX_LOW_WATERMARK)
gfp_mask |= __GFP_NOWARN;
@@ -308,17 +305,17 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
return;
}
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
if (list_empty(&rxq->rx_used)) {
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
__free_pages(page, trans_pcie->rx_page_order);
return;
}
rxb = list_first_entry(&rxq->rx_used, struct iwl_rx_mem_buffer,
list);
list_del(&rxb->list);
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
BUG_ON(rxb->page);
rxb->page = page;
@@ -329,9 +326,9 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
DMA_FROM_DEVICE);
if (dma_mapping_error(trans->dev, rxb->page_dma)) {
rxb->page = NULL;
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
list_add(&rxb->list, &rxq->rx_used);
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
__free_pages(page, trans_pcie->rx_page_order);
return;
}
@@ -340,12 +337,12 @@ static void iwl_pcie_rxq_alloc_rbs(struct iwl_trans *trans, gfp_t priority)
/* and also 256 byte aligned! */
BUG_ON(rxb->page_dma & DMA_BIT_MASK(8));
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
list_add_tail(&rxb->list, &rxq->rx_free);
rxq->free_count++;
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
}
}
@@ -379,13 +376,12 @@ static void iwl_pcie_rxq_free_rbs(struct iwl_trans *trans)
static void iwl_pcie_rx_replenish(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned long flags;
iwl_pcie_rxq_alloc_rbs(trans, GFP_KERNEL);
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
iwl_pcie_rxq_restock(trans);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
}
static void iwl_pcie_rx_replenish_now(struct iwl_trans *trans)
@@ -511,7 +507,6 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
int i, err;
- unsigned long flags;
if (!rxq->bd) {
err = iwl_pcie_rx_alloc(trans);
@@ -519,7 +514,7 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
return err;
}
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
INIT_WORK(&trans_pcie->rx_replenish, iwl_pcie_rx_replenish_work);
@@ -535,16 +530,16 @@ int iwl_pcie_rx_init(struct iwl_trans *trans)
rxq->read = rxq->write = 0;
rxq->write_actual = 0;
memset(rxq->rb_stts, 0, sizeof(*rxq->rb_stts));
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
iwl_pcie_rx_replenish(trans);
iwl_pcie_rx_hw_init(trans, rxq);
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
rxq->need_update = 1;
iwl_pcie_rxq_inc_wr_ptr(trans, rxq);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
return 0;
}
@@ -553,7 +548,6 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
- unsigned long flags;
/*if rxq->bd is NULL, it means that nothing has been allocated,
* exit now */
@@ -564,9 +558,9 @@ void iwl_pcie_rx_free(struct iwl_trans *trans)
cancel_work_sync(&trans_pcie->rx_replenish);
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
iwl_pcie_rxq_free_rbs(trans);
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
dma_free_coherent(trans->dev, sizeof(__le32) * RX_QUEUE_SIZE,
rxq->bd, rxq->bd_dma);
@@ -589,7 +583,6 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_rxq *rxq = &trans_pcie->rxq;
struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
- unsigned long flags;
bool page_stolen = false;
int max_len = PAGE_SIZE << trans_pcie->rx_page_order;
u32 offset = 0;
@@ -691,7 +684,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
/* Reuse the page if possible. For notification packets and
* SKBs that fail to Rx correctly, add them back into the
* rx_free list for reuse later. */
- spin_lock_irqsave(&rxq->lock, flags);
+ spin_lock(&rxq->lock);
if (rxb->page != NULL) {
rxb->page_dma =
dma_map_page(trans->dev, rxb->page, 0,
@@ -712,7 +705,7 @@ static void iwl_pcie_rx_handle_rb(struct iwl_trans *trans,
}
} else
list_add_tail(&rxb->list, &rxq->rx_used);
- spin_unlock_irqrestore(&rxq->lock, flags);
+ spin_unlock(&rxq->lock);
}
/*
@@ -807,6 +800,87 @@ static void iwl_pcie_irq_handle_error(struct iwl_trans *trans)
wake_up(&trans_pcie->wait_command_queue);
}
+static u32 iwl_pcie_int_cause_non_ict(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 inta;
+
+ lockdep_assert_held(&trans_pcie->irq_lock);
+
+ trace_iwlwifi_dev_irq(trans->dev);
+
+ /* Discover which interrupts are active/pending */
+ inta = iwl_read32(trans, CSR_INT);
+
+ /* the thread will service interrupts and re-enable them */
+ return inta;
+}
+
+/* a device (PCI-E) page is 4096 bytes long */
+#define ICT_SHIFT 12
+#define ICT_SIZE (1 << ICT_SHIFT)
+#define ICT_COUNT (ICT_SIZE / sizeof(u32))
+
+/* interrupt handler using ict table, with this interrupt driver will
+ * stop using INTA register to get device's interrupt, reading this register
+ * is expensive, device will write interrupts in ICT dram table, increment
+ * index then will fire interrupt to driver, driver will OR all ICT table
+ * entries from current index up to table entry with 0 value. the result is
+ * the interrupt we need to service, driver will set the entries back to 0 and
+ * set index.
+ */
+static u32 iwl_pcie_int_cause_ict(struct iwl_trans *trans)
+{
+ struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
+ u32 inta;
+ u32 val = 0;
+ u32 read;
+
+ trace_iwlwifi_dev_irq(trans->dev);
+
+ /* Ignore interrupt if there's nothing in NIC to service.
+ * This may be due to IRQ shared with another device,
+ * or due to sporadic interrupts thrown from our NIC. */
+ read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
+ trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
+ if (!read)
+ return 0;
+
+ /*
+ * Collect all entries up to the first 0, starting from ict_index;
+ * note we already read at ict_index.
+ */
+ do {
+ val |= read;
+ IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
+ trans_pcie->ict_index, read);
+ trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
+ trans_pcie->ict_index =
+ iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
+
+ read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
+ trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
+ read);
+ } while (read);
+
+ /* We should not get this value, just ignore it. */
+ if (val == 0xffffffff)
+ val = 0;
+
+ /*
+ * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
+ * (bit 15 before shifting it to 31) to clear when using interrupt
+ * coalescing. fortunately, bits 18 and 19 stay set when this happens
+ * so we use them to decide on the real state of the Rx bit.
+ * In order words, bit 15 is set if bit 18 or bit 19 are set.
+ */
+ if (val & 0xC0000)
+ val |= 0x8000;
+
+ inta = (0xff & val) | ((0xff00 & val) << 16);
+ return inta;
+}
+
irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
{
struct iwl_trans *trans = dev_id;
@@ -814,12 +888,61 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
struct isr_statistics *isr_stats = &trans_pcie->isr_stats;
u32 inta = 0;
u32 handled = 0;
- unsigned long flags;
u32 i;
lock_map_acquire(&trans->sync_cmd_lockdep_map);
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
+
+ /* dram interrupt table not set yet,
+ * use legacy interrupt.
+ */
+ if (likely(trans_pcie->use_ict))
+ inta = iwl_pcie_int_cause_ict(trans);
+ else
+ inta = iwl_pcie_int_cause_non_ict(trans);
+
+ if (iwl_have_debug_level(IWL_DL_ISR)) {
+ IWL_DEBUG_ISR(trans,
+ "ISR inta 0x%08x, enabled 0x%08x(sw), enabled(hw) 0x%08x, fh 0x%08x\n",
+ inta, trans_pcie->inta_mask,
+ iwl_read32(trans, CSR_INT_MASK),
+ iwl_read32(trans, CSR_FH_INT_STATUS));
+ if (inta & (~trans_pcie->inta_mask))
+ IWL_DEBUG_ISR(trans,
+ "We got a masked interrupt (0x%08x)\n",
+ inta & (~trans_pcie->inta_mask));
+ }
+
+ inta &= trans_pcie->inta_mask;
+
+ /*
+ * Ignore interrupt if there's nothing in NIC to service.
+ * This may be due to IRQ shared with another device,
+ * or due to sporadic interrupts thrown from our NIC.
+ */
+ if (unlikely(!inta)) {
+ IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
+ /*
+ * Re-enable interrupts here since we don't
+ * have anything to service
+ */
+ if (test_bit(STATUS_INT_ENABLED, &trans->status))
+ iwl_enable_interrupts(trans);
+ spin_unlock(&trans_pcie->irq_lock);
+ lock_map_release(&trans->sync_cmd_lockdep_map);
+ return IRQ_NONE;
+ }
+
+ if (unlikely(inta == 0xFFFFFFFF || (inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
+ /*
+ * Hardware disappeared. It might have
+ * already raised an interrupt.
+ */
+ IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
+ spin_unlock(&trans_pcie->irq_lock);
+ goto out;
+ }
/* Ack/clear/reset pending uCode interrupts.
* Note: Some bits in CSR_INT are "OR" of bits in CSR_FH_INT_STATUS,
@@ -832,19 +955,13 @@ irqreturn_t iwl_pcie_irq_handler(int irq, void *dev_id)
* hardware bugs here by ACKing all the possible interrupts so that
* interrupt coalescing can still be achieved.
*/
- iwl_write32(trans, CSR_INT,
- trans_pcie->inta | ~trans_pcie->inta_mask);
-
- inta = trans_pcie->inta;
+ iwl_write32(trans, CSR_INT, inta | ~trans_pcie->inta_mask);
if (iwl_have_debug_level(IWL_DL_ISR))
IWL_DEBUG_ISR(trans, "inta 0x%08x, enabled 0x%08x\n",
inta, iwl_read32(trans, CSR_INT_MASK));
- /* saved interrupt in inta variable now we can reset trans_pcie->inta */
- trans_pcie->inta = 0;
-
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
/* Now service all interrupt bits discovered above. */
if (inta & CSR_INT_BIT_HW_ERR) {
@@ -1019,11 +1136,6 @@ out:
*
******************************************************************************/
-/* a device (PCI-E) page is 4096 bytes long */
-#define ICT_SHIFT 12
-#define ICT_SIZE (1 << ICT_SHIFT)
-#define ICT_COUNT (ICT_SIZE / sizeof(u32))
-
/* Free dram table */
void iwl_pcie_free_ict(struct iwl_trans *trans)
{
@@ -1048,7 +1160,7 @@ int iwl_pcie_alloc_ict(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
trans_pcie->ict_tbl =
- dma_alloc_coherent(trans->dev, ICT_SIZE,
+ dma_zalloc_coherent(trans->dev, ICT_SIZE,
&trans_pcie->ict_tbl_dma,
GFP_KERNEL);
if (!trans_pcie->ict_tbl)
@@ -1060,17 +1172,10 @@ int iwl_pcie_alloc_ict(struct iwl_trans *trans)
return -EINVAL;
}
- IWL_DEBUG_ISR(trans, "ict dma addr %Lx\n",
- (unsigned long long)trans_pcie->ict_tbl_dma);
-
- IWL_DEBUG_ISR(trans, "ict vir addr %p\n", trans_pcie->ict_tbl);
+ IWL_DEBUG_ISR(trans, "ict dma addr %Lx ict vir addr %p\n",
+ (unsigned long long)trans_pcie->ict_tbl_dma,
+ trans_pcie->ict_tbl);
- /* reset table and index to all 0 */
- memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
- trans_pcie->ict_index = 0;
-
- /* add periodic RX interrupt */
- trans_pcie->inta_mask |= CSR_INT_BIT_RX_PERIODIC;
return 0;
}
@@ -1081,12 +1186,11 @@ void iwl_pcie_reset_ict(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
u32 val;
- unsigned long flags;
if (!trans_pcie->ict_tbl)
return;
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
iwl_disable_interrupts(trans);
memset(trans_pcie->ict_tbl, 0, ICT_SIZE);
@@ -1103,120 +1207,26 @@ void iwl_pcie_reset_ict(struct iwl_trans *trans)
trans_pcie->ict_index = 0;
iwl_write32(trans, CSR_INT, trans_pcie->inta_mask);
iwl_enable_interrupts(trans);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
}
/* Device is going down disable ict interrupt usage */
void iwl_pcie_disable_ict(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned long flags;
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
trans_pcie->use_ict = false;
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
-}
-
-/* legacy (non-ICT) ISR. Assumes that trans_pcie->irq_lock is held */
-static irqreturn_t iwl_pcie_isr(int irq, void *data)
-{
- struct iwl_trans *trans = data;
- struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- u32 inta, inta_mask;
-
- lockdep_assert_held(&trans_pcie->irq_lock);
-
- trace_iwlwifi_dev_irq(trans->dev);
-
- /* Disable (but don't clear!) interrupts here to avoid
- * back-to-back ISRs and sporadic interrupts from our NIC.
- * If we have something to service, the irq thread will re-enable ints.
- * If we *don't* have something, we'll re-enable before leaving here. */
- inta_mask = iwl_read32(trans, CSR_INT_MASK);
- iwl_write32(trans, CSR_INT_MASK, 0x00000000);
-
- /* Discover which interrupts are active/pending */
- inta = iwl_read32(trans, CSR_INT);
-
- if (inta & (~inta_mask)) {
- IWL_DEBUG_ISR(trans,
- "We got a masked interrupt (0x%08x)...Ack and ignore\n",
- inta & (~inta_mask));
- iwl_write32(trans, CSR_INT, inta & (~inta_mask));
- inta &= inta_mask;
- }
-
- /* Ignore interrupt if there's nothing in NIC to service.
- * This may be due to IRQ shared with another device,
- * or due to sporadic interrupts thrown from our NIC. */
- if (!inta) {
- IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
- /*
- * Re-enable interrupts here since we don't have anything to
- * service, but only in case the handler won't run. Note that
- * the handler can be scheduled because of a previous
- * interrupt.
- */
- if (test_bit(STATUS_INT_ENABLED, &trans->status) &&
- !trans_pcie->inta)
- iwl_enable_interrupts(trans);
- return IRQ_NONE;
- }
-
- if ((inta == 0xFFFFFFFF) || ((inta & 0xFFFFFFF0) == 0xa5a5a5a0)) {
- /* Hardware disappeared. It might have already raised
- * an interrupt */
- IWL_WARN(trans, "HARDWARE GONE?? INTA == 0x%08x\n", inta);
- return IRQ_HANDLED;
- }
-
- if (iwl_have_debug_level(IWL_DL_ISR))
- IWL_DEBUG_ISR(trans,
- "ISR inta 0x%08x, enabled 0x%08x, fh 0x%08x\n",
- inta, inta_mask,
- iwl_read32(trans, CSR_FH_INT_STATUS));
-
- trans_pcie->inta |= inta;
- /* the thread will service interrupts and re-enable them */
- return IRQ_WAKE_THREAD;
+ spin_unlock(&trans_pcie->irq_lock);
}
-/* interrupt handler using ict table, with this interrupt driver will
- * stop using INTA register to get device's interrupt, reading this register
- * is expensive, device will write interrupts in ICT dram table, increment
- * index then will fire interrupt to driver, driver will OR all ICT table
- * entries from current index up to table entry with 0 value. the result is
- * the interrupt we need to service, driver will set the entries back to 0 and
- * set index.
- */
-irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
+irqreturn_t iwl_pcie_isr(int irq, void *data)
{
struct iwl_trans *trans = data;
- struct iwl_trans_pcie *trans_pcie;
- u32 inta;
- u32 val = 0;
- u32 read;
- unsigned long flags;
- irqreturn_t ret = IRQ_NONE;
if (!trans)
return IRQ_NONE;
- trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
-
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
-
- /* dram interrupt table not set yet,
- * use legacy interrupt.
- */
- if (unlikely(!trans_pcie->use_ict)) {
- ret = iwl_pcie_isr(irq, data);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
- return ret;
- }
-
- trace_iwlwifi_dev_irq(trans->dev);
-
/* Disable (but don't clear!) interrupts here to avoid
* back-to-back ISRs and sporadic interrupts from our NIC.
* If we have something to service, the tasklet will re-enable ints.
@@ -1224,73 +1234,5 @@ irqreturn_t iwl_pcie_isr_ict(int irq, void *data)
*/
iwl_write32(trans, CSR_INT_MASK, 0x00000000);
- /* Ignore interrupt if there's nothing in NIC to service.
- * This may be due to IRQ shared with another device,
- * or due to sporadic interrupts thrown from our NIC. */
- read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
- trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index, read);
- if (!read) {
- IWL_DEBUG_ISR(trans, "Ignore interrupt, inta == 0\n");
- goto none;
- }
-
- /*
- * Collect all entries up to the first 0, starting from ict_index;
- * note we already read at ict_index.
- */
- do {
- val |= read;
- IWL_DEBUG_ISR(trans, "ICT index %d value 0x%08X\n",
- trans_pcie->ict_index, read);
- trans_pcie->ict_tbl[trans_pcie->ict_index] = 0;
- trans_pcie->ict_index =
- iwl_queue_inc_wrap(trans_pcie->ict_index, ICT_COUNT);
-
- read = le32_to_cpu(trans_pcie->ict_tbl[trans_pcie->ict_index]);
- trace_iwlwifi_dev_ict_read(trans->dev, trans_pcie->ict_index,
- read);
- } while (read);
-
- /* We should not get this value, just ignore it. */
- if (val == 0xffffffff)
- val = 0;
-
- /*
- * this is a w/a for a h/w bug. the h/w bug may cause the Rx bit
- * (bit 15 before shifting it to 31) to clear when using interrupt
- * coalescing. fortunately, bits 18 and 19 stay set when this happens
- * so we use them to decide on the real state of the Rx bit.
- * In order words, bit 15 is set if bit 18 or bit 19 are set.
- */
- if (val & 0xC0000)
- val |= 0x8000;
-
- inta = (0xff & val) | ((0xff00 & val) << 16);
- IWL_DEBUG_ISR(trans, "ISR inta 0x%08x, enabled(sw) 0x%08x ict 0x%08x\n",
- inta, trans_pcie->inta_mask, val);
- if (iwl_have_debug_level(IWL_DL_ISR))
- IWL_DEBUG_ISR(trans, "enabled(hw) 0x%08x\n",
- iwl_read32(trans, CSR_INT_MASK));
-
- inta &= trans_pcie->inta_mask;
- trans_pcie->inta |= inta;
-
- /* iwl_pcie_tasklet() will service interrupts and re-enable them */
- if (likely(inta)) {
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
- return IRQ_WAKE_THREAD;
- }
-
- ret = IRQ_HANDLED;
-
- none:
- /* re-enable interrupts here since we don't have anything to service.
- * only Re-enable if disabled by irq.
- */
- if (test_bit(STATUS_INT_ENABLED, &trans->status) &&
- !trans_pcie->inta)
- iwl_enable_interrupts(trans);
-
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
- return ret;
+ return IRQ_WAKE_THREAD;
}
diff --git a/drivers/net/wireless/iwlwifi/pcie/trans.c b/drivers/net/wireless/iwlwifi/pcie/trans.c
index eecd38e3f15f..16f66c1a23de 100644
--- a/drivers/net/wireless/iwlwifi/pcie/trans.c
+++ b/drivers/net/wireless/iwlwifi/pcie/trans.c
@@ -5,7 +5,7 @@
*
* GPL LICENSE SUMMARY
*
- * Copyright(c) 2007 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2007 - 2014 Intel Corporation. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of version 2 of the GNU General Public License as
@@ -30,7 +30,7 @@
*
* BSD LICENSE
*
- * Copyright(c) 2005 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2005 - 2014 Intel Corporation. All rights reserved.
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
@@ -75,33 +75,6 @@
#include "iwl-agn-hw.h"
#include "internal.h"
-static void __iwl_trans_pcie_set_bits_mask(struct iwl_trans *trans,
- u32 reg, u32 mask, u32 value)
-{
- u32 v;
-
-#ifdef CONFIG_IWLWIFI_DEBUG
- WARN_ON_ONCE(value & ~mask);
-#endif
-
- v = iwl_read32(trans, reg);
- v &= ~mask;
- v |= value;
- iwl_write32(trans, reg, v);
-}
-
-static inline void __iwl_trans_pcie_clear_bit(struct iwl_trans *trans,
- u32 reg, u32 mask)
-{
- __iwl_trans_pcie_set_bits_mask(trans, reg, mask, 0);
-}
-
-static inline void __iwl_trans_pcie_set_bit(struct iwl_trans *trans,
- u32 reg, u32 mask)
-{
- __iwl_trans_pcie_set_bits_mask(trans, reg, mask, mask);
-}
-
static void iwl_pcie_set_pwr(struct iwl_trans *trans, bool vaux)
{
if (vaux && pci_pme_capable(to_pci_dev(trans->dev), PCI_D3cold))
@@ -271,13 +244,12 @@ static void iwl_pcie_apm_stop(struct iwl_trans *trans)
static int iwl_pcie_nic_init(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned long flags;
/* nic_init */
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
iwl_pcie_apm_init(trans);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
iwl_pcie_set_pwr(trans, false);
@@ -635,13 +607,14 @@ static void iwl_trans_pcie_fw_alive(struct iwl_trans *trans, u32 scd_addr)
static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned long flags;
- bool hw_rfkill;
+ bool hw_rfkill, was_hw_rfkill;
+
+ was_hw_rfkill = iwl_is_rfkill_set(trans);
/* tell the device to stop sending interrupts */
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
iwl_disable_interrupts(trans);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
/* device going down, Stop using ICT table */
iwl_pcie_disable_ict(trans);
@@ -673,9 +646,9 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
/* Upon stop, the APM issues an interrupt if HW RF kill is set.
* Clean again the interrupt here
*/
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
iwl_disable_interrupts(trans);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
/* stop and reset the on-board processor */
iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_NEVO_RESET);
@@ -698,13 +671,20 @@ static void iwl_trans_pcie_stop_device(struct iwl_trans *trans)
* all the interrupts were disabled, in this case we couldn't
* receive the RF kill interrupt and update the state in the
* op_mode.
+ * Don't call the op_mode if the rkfill state hasn't changed.
+ * This allows the op_mode to call stop_device from the rfkill
+ * notification without endless recursion. Under very rare
+ * circumstances, we might have a small recursion if the rfkill
+ * state changed exactly now while we were called from stop_device.
+ * This is very unlikely but can happen and is supported.
*/
hw_rfkill = iwl_is_rfkill_set(trans);
if (hw_rfkill)
set_bit(STATUS_RFKILL, &trans->status);
else
clear_bit(STATUS_RFKILL, &trans->status);
- iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
+ if (hw_rfkill != was_hw_rfkill)
+ iwl_op_mode_hw_rf_kill(trans->op_mode, hw_rfkill);
}
static void iwl_trans_pcie_d3_suspend(struct iwl_trans *trans, bool test)
@@ -799,7 +779,7 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
}
/* Reset the entire device */
- iwl_set_bit(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
+ iwl_write32(trans, CSR_RESET, CSR_RESET_REG_FLAG_SW_RESET);
usleep_range(10, 15);
@@ -821,18 +801,17 @@ static int iwl_trans_pcie_start_hw(struct iwl_trans *trans)
static void iwl_trans_pcie_op_mode_leave(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
- unsigned long flags;
/* disable interrupts - don't enable HW RF kill interrupt */
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
iwl_disable_interrupts(trans);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
iwl_pcie_apm_stop(trans);
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
iwl_disable_interrupts(trans);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
iwl_pcie_disable_ict(trans);
}
@@ -932,6 +911,9 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
spin_lock_irqsave(&trans_pcie->reg_lock, *flags);
+ if (trans_pcie->cmd_in_flight)
+ goto out;
+
/* this bit wakes up the NIC */
__iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
@@ -971,6 +953,7 @@ static bool iwl_trans_pcie_grab_nic_access(struct iwl_trans *trans, bool silent,
}
}
+out:
/*
* Fool sparse by faking we release the lock - sparse will
* track nic_access anyway.
@@ -992,6 +975,9 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
*/
__acquire(&trans_pcie->reg_lock);
+ if (trans_pcie->cmd_in_flight)
+ goto out;
+
__iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
/*
@@ -1001,6 +987,7 @@ static void iwl_trans_pcie_release_nic_access(struct iwl_trans *trans,
* scheduled on different CPUs (after we drop reg_lock).
*/
mmiowb();
+out:
spin_unlock_irqrestore(&trans_pcie->reg_lock, *flags);
}
@@ -1597,7 +1584,7 @@ struct iwl_trans *iwl_trans_pcie_alloc(struct pci_dev *pdev,
if (iwl_pcie_alloc_ict(trans))
goto out_free_cmd_pool;
- err = request_threaded_irq(pdev->irq, iwl_pcie_isr_ict,
+ err = request_threaded_irq(pdev->irq, iwl_pcie_isr,
iwl_pcie_irq_handler,
IRQF_SHARED, DRV_NAME, trans);
if (err) {
diff --git a/drivers/net/wireless/iwlwifi/pcie/tx.c b/drivers/net/wireless/iwlwifi/pcie/tx.c
index 8df24787c141..3b14fa8abfc7 100644
--- a/drivers/net/wireless/iwlwifi/pcie/tx.c
+++ b/drivers/net/wireless/iwlwifi/pcie/tx.c
@@ -1,6 +1,6 @@
/******************************************************************************
*
- * Copyright(c) 2003 - 2013 Intel Corporation. All rights reserved.
+ * Copyright(c) 2003 - 2014 Intel Corporation. All rights reserved.
*
* Portions of this file are derived from the ipw3945 project, as well
* as portions of the ieee80211 subsystem header files.
@@ -737,10 +737,9 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans)
{
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ch, txq_id, ret;
- unsigned long flags;
/* Turn off all Tx DMA fifos */
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
iwl_pcie_txq_set_sched(trans, 0);
@@ -757,13 +756,19 @@ int iwl_pcie_tx_stop(struct iwl_trans *trans)
iwl_read_direct32(trans,
FH_TSSR_TX_STATUS_REG));
}
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
- if (!trans_pcie->txq) {
- IWL_WARN(trans,
- "Stopping tx queues that aren't allocated...\n");
+ /*
+ * This function can be called before the op_mode disabled the
+ * queues. This happens when we have an rfkill interrupt.
+ * Since we stop Tx altogether - mark the queues as stopped.
+ */
+ memset(trans_pcie->queue_stopped, 0, sizeof(trans_pcie->queue_stopped));
+ memset(trans_pcie->queue_used, 0, sizeof(trans_pcie->queue_used));
+
+ /* This can happen: start_hw, stop_device */
+ if (!trans_pcie->txq)
return 0;
- }
/* Unmap DMA from host system and free skb's */
for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
@@ -865,7 +870,6 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int ret;
int txq_id, slots_num;
- unsigned long flags;
bool alloc = false;
if (!trans_pcie->txq) {
@@ -875,7 +879,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
alloc = true;
}
- spin_lock_irqsave(&trans_pcie->irq_lock, flags);
+ spin_lock(&trans_pcie->irq_lock);
/* Turn off all Tx DMA fifos */
iwl_write_prph(trans, SCD_TXFACT, 0);
@@ -884,7 +888,7 @@ int iwl_pcie_tx_init(struct iwl_trans *trans)
iwl_write_direct32(trans, FH_KW_MEM_ADDR_REG,
trans_pcie->kw.dma >> 4);
- spin_unlock_irqrestore(&trans_pcie->irq_lock, flags);
+ spin_unlock(&trans_pcie->irq_lock);
/* Alloc and init all Tx queues, including the command queue (#4/#9) */
for (txq_id = 0; txq_id < trans->cfg->base_params->num_of_queues;
@@ -1003,6 +1007,7 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
struct iwl_txq *txq = &trans_pcie->txq[txq_id];
struct iwl_queue *q = &txq->q;
+ unsigned long flags;
int nfreed = 0;
lockdep_assert_held(&txq->lock);
@@ -1025,6 +1030,16 @@ static void iwl_pcie_cmdq_reclaim(struct iwl_trans *trans, int txq_id, int idx)
}
}
+ if (q->read_ptr == q->write_ptr) {
+ spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+ WARN_ON(!trans_pcie->cmd_in_flight);
+ trans_pcie->cmd_in_flight = false;
+ __iwl_trans_pcie_clear_bit(trans,
+ CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+ }
+
iwl_pcie_txq_progress(trans_pcie, txq);
}
@@ -1141,8 +1156,15 @@ void iwl_trans_pcie_txq_disable(struct iwl_trans *trans, int txq_id)
SCD_TX_STTS_QUEUE_OFFSET(txq_id);
static const u32 zero_val[4] = {};
+ /*
+ * Upon HW Rfkill - we stop the device, and then stop the queues
+ * in the op_mode. Just for the sake of the simplicity of the op_mode,
+ * allow the op_mode to call txq_disable after it already called
+ * stop_device.
+ */
if (!test_and_clear_bit(txq_id, trans_pcie->queue_used)) {
- WARN_ONCE(1, "queue %d not used", txq_id);
+ WARN_ONCE(test_bit(STATUS_DEVICE_ENABLED, &trans->status),
+ "queue %d not used", txq_id);
return;
}
@@ -1176,12 +1198,13 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
struct iwl_queue *q = &txq->q;
struct iwl_device_cmd *out_cmd;
struct iwl_cmd_meta *out_meta;
+ unsigned long flags;
void *dup_buf = NULL;
dma_addr_t phys_addr;
int idx;
u16 copy_size, cmd_size, scratch_size;
bool had_nocopy = false;
- int i;
+ int i, ret;
u32 cmd_pos;
const u8 *cmddata[IWL_MAX_CMD_TBS_PER_TFD];
u16 cmdlen[IWL_MAX_CMD_TBS_PER_TFD];
@@ -1379,10 +1402,38 @@ static int iwl_pcie_enqueue_hcmd(struct iwl_trans *trans,
if (q->read_ptr == q->write_ptr && trans_pcie->wd_timeout)
mod_timer(&txq->stuck_timer, jiffies + trans_pcie->wd_timeout);
+ spin_lock_irqsave(&trans_pcie->reg_lock, flags);
+
+ /*
+ * wake up the NIC to make sure that the firmware will see the host
+ * command - we will let the NIC sleep once all the host commands
+ * returned.
+ */
+ if (!trans_pcie->cmd_in_flight) {
+ trans_pcie->cmd_in_flight = true;
+ __iwl_trans_pcie_set_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ ret = iwl_poll_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_VAL_MAC_ACCESS_EN,
+ (CSR_GP_CNTRL_REG_FLAG_MAC_CLOCK_READY |
+ CSR_GP_CNTRL_REG_FLAG_GOING_TO_SLEEP),
+ 15000);
+ if (ret < 0) {
+ __iwl_trans_pcie_clear_bit(trans, CSR_GP_CNTRL,
+ CSR_GP_CNTRL_REG_FLAG_MAC_ACCESS_REQ);
+ spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+ trans_pcie->cmd_in_flight = false;
+ idx = -EIO;
+ goto out;
+ }
+ }
+
/* Increment and update queue's write index */
q->write_ptr = iwl_queue_inc_wrap(q->write_ptr, q->n_bd);
iwl_pcie_txq_inc_wr_ptr(trans, txq);
+ spin_unlock_irqrestore(&trans_pcie->reg_lock, flags);
+
out:
spin_unlock_bh(&txq->lock);
free_dup_buf:
@@ -1464,7 +1515,6 @@ void iwl_pcie_hcmd_complete(struct iwl_trans *trans,
}
#define HOST_COMPLETE_TIMEOUT (2 * HZ)
-#define COMMAND_POKE_TIMEOUT (HZ / 10)
static int iwl_pcie_send_hcmd_async(struct iwl_trans *trans,
struct iwl_host_cmd *cmd)
@@ -1492,7 +1542,6 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
struct iwl_trans_pcie *trans_pcie = IWL_TRANS_GET_PCIE_TRANS(trans);
int cmd_idx;
int ret;
- int timeout = HOST_COMPLETE_TIMEOUT;
IWL_DEBUG_INFO(trans, "Attempting to send sync command %s\n",
get_cmd_string(trans_pcie, cmd->id));
@@ -1516,29 +1565,10 @@ static int iwl_pcie_send_hcmd_sync(struct iwl_trans *trans,
return ret;
}
- while (timeout > 0) {
- unsigned long flags;
-
- timeout -= COMMAND_POKE_TIMEOUT;
- ret = wait_event_timeout(trans_pcie->wait_command_queue,
- !test_bit(STATUS_SYNC_HCMD_ACTIVE,
- &trans->status),
- COMMAND_POKE_TIMEOUT);
- if (ret)
- break;
- /* poke the device - it may have lost the command */
- if (iwl_trans_grab_nic_access(trans, true, &flags)) {
- iwl_trans_release_nic_access(trans, &flags);
- IWL_DEBUG_INFO(trans,
- "Tried to wake NIC for command %s\n",
- get_cmd_string(trans_pcie, cmd->id));
- } else {
- IWL_ERR(trans, "Failed to poke NIC for command %s\n",
- get_cmd_string(trans_pcie, cmd->id));
- break;
- }
- }
-
+ ret = wait_event_timeout(trans_pcie->wait_command_queue,
+ !test_bit(STATUS_SYNC_HCMD_ACTIVE,
+ &trans->status),
+ HOST_COMPLETE_TIMEOUT);
if (!ret) {
struct iwl_txq *txq = &trans_pcie->txq[trans_pcie->cmd_queue];
struct iwl_queue *q = &txq->q;