summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_cldma.c12
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c14
-rw-r--r--drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c41
-rw-r--r--drivers/net/wwan/t7xx/t7xx_mhccif.c3
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.c93
-rw-r--r--drivers/net/wwan/t7xx/t7xx_pci.h10
6 files changed, 158 insertions, 15 deletions
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
index 90306eb9858b..46066dcd2607 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_cldma.c
@@ -934,6 +934,7 @@ int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb
if (ret < 0 && ret != -EACCES)
return ret;
+ t7xx_pci_disable_sleep(md_ctrl->t7xx_dev);
queue = &md_ctrl->txq[qno];
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
@@ -955,6 +956,11 @@ int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb
queue->tx_next = list_next_entry_circular(tx_req, gpd_ring, entry);
spin_unlock_irqrestore(&queue->ring_lock, flags);
+ if (!t7xx_pci_sleep_disable_complete(md_ctrl->t7xx_dev)) {
+ ret = -ETIMEDOUT;
+ break;
+ }
+
/* Protect the access to the modem for queues operations (resume/start)
* which access shared locations by all the queues.
* cldma_lock is independent of ring_lock which is per queue.
@@ -967,6 +973,11 @@ int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb
}
spin_unlock_irqrestore(&queue->ring_lock, flags);
+ if (!t7xx_pci_sleep_disable_complete(md_ctrl->t7xx_dev)) {
+ ret = -ETIMEDOUT;
+ break;
+ }
+
if (!t7xx_cldma_hw_queue_status(&md_ctrl->hw_info, qno, MTK_TX)) {
spin_lock_irqsave(&md_ctrl->cldma_lock, flags);
t7xx_cldma_hw_resume_queue(&md_ctrl->hw_info, qno, MTK_TX);
@@ -977,6 +988,7 @@ int t7xx_cldma_send_skb(struct cldma_ctrl *md_ctrl, int qno, struct sk_buff *skb
} while (!ret);
allow_sleep:
+ t7xx_pci_enable_sleep(md_ctrl->t7xx_dev);
pm_runtime_mark_last_busy(md_ctrl->dev);
pm_runtime_put_autosuspend(md_ctrl->dev);
return ret;
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
index 5f25555eb4a4..35a8a0d7c1ee 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_rx.c
@@ -927,8 +927,11 @@ static void t7xx_dpmaif_rxq_work(struct work_struct *work)
if (ret < 0 && ret != -EACCES)
return;
- t7xx_dpmaif_do_rx(dpmaif_ctrl, rxq);
+ t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
+ if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev))
+ t7xx_dpmaif_do_rx(dpmaif_ctrl, rxq);
+ t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
atomic_set(&rxq->rx_processing, 0);
@@ -1138,11 +1141,16 @@ static void t7xx_dpmaif_bat_release_work(struct work_struct *work)
if (ret < 0 && ret != -EACCES)
return;
+ t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
+
/* ALL RXQ use one BAT table, so choose DPF_RX_QNO_DFT */
rxq = &dpmaif_ctrl->rxq[DPF_RX_QNO_DFT];
- t7xx_dpmaif_bat_release_and_add(rxq);
- t7xx_dpmaif_frag_bat_release_and_add(rxq);
+ if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) {
+ t7xx_dpmaif_bat_release_and_add(rxq);
+ t7xx_dpmaif_frag_bat_release_and_add(rxq);
+ }
+ t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
}
diff --git a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c
index a5ac9dfaecfd..46514208d4f9 100644
--- a/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c
+++ b/drivers/net/wwan/t7xx/t7xx_hif_dpmaif_tx.c
@@ -166,20 +166,25 @@ static void t7xx_dpmaif_tx_done(struct work_struct *work)
if (ret < 0 && ret != -EACCES)
return;
- hw_info = &dpmaif_ctrl->hw_info;
- ret = t7xx_dpmaif_tx_release(dpmaif_ctrl, txq->index, txq->drb_size_cnt);
- if (ret == -EAGAIN ||
- (t7xx_dpmaif_ul_clr_done(hw_info, txq->index) &&
- t7xx_dpmaif_drb_ring_not_empty(txq))) {
- queue_work(dpmaif_ctrl->txq[txq->index].worker,
- &dpmaif_ctrl->txq[txq->index].dpmaif_tx_work);
- /* Give the device time to enter the low power state */
- t7xx_dpmaif_clr_ip_busy_sts(hw_info);
- } else {
- t7xx_dpmaif_clr_ip_busy_sts(hw_info);
- t7xx_dpmaif_unmask_ulq_intr(hw_info, txq->index);
+ /* The device may be in low power state. Disable sleep if needed */
+ t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
+ if (t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev)) {
+ hw_info = &dpmaif_ctrl->hw_info;
+ ret = t7xx_dpmaif_tx_release(dpmaif_ctrl, txq->index, txq->drb_size_cnt);
+ if (ret == -EAGAIN ||
+ (t7xx_dpmaif_ul_clr_done(hw_info, txq->index) &&
+ t7xx_dpmaif_drb_ring_not_empty(txq))) {
+ queue_work(dpmaif_ctrl->txq[txq->index].worker,
+ &dpmaif_ctrl->txq[txq->index].dpmaif_tx_work);
+ /* Give the device time to enter the low power state */
+ t7xx_dpmaif_clr_ip_busy_sts(hw_info);
+ } else {
+ t7xx_dpmaif_clr_ip_busy_sts(hw_info);
+ t7xx_dpmaif_unmask_ulq_intr(hw_info, txq->index);
+ }
}
+ t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
}
@@ -405,6 +410,8 @@ static int t7xx_txq_burst_send_skb(struct dpmaif_tx_queue *txq)
static void t7xx_do_tx_hw_push(struct dpmaif_ctrl *dpmaif_ctrl)
{
+ bool wait_disable_sleep = true;
+
do {
struct dpmaif_tx_queue *txq;
int drb_send_cnt;
@@ -420,6 +427,14 @@ static void t7xx_do_tx_hw_push(struct dpmaif_ctrl *dpmaif_ctrl)
continue;
}
+ /* Wait for the PCIe resource to unlock */
+ if (wait_disable_sleep) {
+ if (!t7xx_pci_sleep_disable_complete(dpmaif_ctrl->t7xx_dev))
+ return;
+
+ wait_disable_sleep = false;
+ }
+
t7xx_dpmaif_ul_update_hw_drb_cnt(&dpmaif_ctrl->hw_info, txq->index,
drb_send_cnt * DPMAIF_UL_DRB_SIZE_WORD);
@@ -450,7 +465,9 @@ static int t7xx_dpmaif_tx_hw_push_thread(void *arg)
if (ret < 0 && ret != -EACCES)
return ret;
+ t7xx_pci_disable_sleep(dpmaif_ctrl->t7xx_dev);
t7xx_do_tx_hw_push(dpmaif_ctrl);
+ t7xx_pci_enable_sleep(dpmaif_ctrl->t7xx_dev);
pm_runtime_mark_last_busy(dpmaif_ctrl->dev);
pm_runtime_put_autosuspend(dpmaif_ctrl->dev);
}
diff --git a/drivers/net/wwan/t7xx/t7xx_mhccif.c b/drivers/net/wwan/t7xx/t7xx_mhccif.c
index 4bb452f5ccff..3ee18d46f8d2 100644
--- a/drivers/net/wwan/t7xx/t7xx_mhccif.c
+++ b/drivers/net/wwan/t7xx/t7xx_mhccif.c
@@ -59,6 +59,9 @@ static irqreturn_t t7xx_mhccif_isr_thread(int irq, void *data)
t7xx_mhccif_clear_interrupts(t7xx_dev, int_status);
+ if (int_status & D2H_INT_DS_LOCK_ACK)
+ complete_all(&t7xx_dev->sleep_lock_acquire);
+
if (int_status & D2H_INT_SR_ACK)
complete(&t7xx_dev->pm_sr_ack);
diff --git a/drivers/net/wwan/t7xx/t7xx_pci.c b/drivers/net/wwan/t7xx/t7xx_pci.c
index 400c11f7b31e..5f1bb8d6afb6 100644
--- a/drivers/net/wwan/t7xx/t7xx_pci.c
+++ b/drivers/net/wwan/t7xx/t7xx_pci.c
@@ -33,6 +33,7 @@
#include <linux/pm.h>
#include <linux/pm_runtime.h>
#include <linux/pm_wakeup.h>
+#include <linux/spinlock.h>
#include "t7xx_mhccif.h"
#include "t7xx_modem_ops.h"
@@ -44,6 +45,7 @@
#define T7XX_PCI_IREG_BASE 0
#define T7XX_PCI_EREG_BASE 2
+#define PM_SLEEP_DIS_TIMEOUT_MS 20
#define PM_ACK_TIMEOUT_MS 1500
#define PM_AUTOSUSPEND_MS 20000
#define PM_RESOURCE_POLL_TIMEOUT_US 10000
@@ -56,6 +58,21 @@ enum t7xx_pm_state {
MTK_PM_RESUMED,
};
+static void t7xx_dev_set_sleep_capability(struct t7xx_pci_dev *t7xx_dev, bool enable)
+{
+ void __iomem *ctrl_reg = IREG_BASE(t7xx_dev) + T7XX_PCIE_MISC_CTRL;
+ u32 value;
+
+ value = ioread32(ctrl_reg);
+
+ if (enable)
+ value &= ~T7XX_PCIE_MISC_MAC_SLEEP_DIS;
+ else
+ value |= T7XX_PCIE_MISC_MAC_SLEEP_DIS;
+
+ iowrite32(value, ctrl_reg);
+}
+
static int t7xx_wait_pm_config(struct t7xx_pci_dev *t7xx_dev)
{
int ret, val;
@@ -76,6 +93,8 @@ static int t7xx_pci_pm_init(struct t7xx_pci_dev *t7xx_dev)
INIT_LIST_HEAD(&t7xx_dev->md_pm_entities);
mutex_init(&t7xx_dev->md_pm_entity_mtx);
+ spin_lock_init(&t7xx_dev->md_pm_lock);
+ init_completion(&t7xx_dev->sleep_lock_acquire);
init_completion(&t7xx_dev->pm_sr_ack);
atomic_set(&t7xx_dev->md_pm_state, MTK_PM_INIT);
@@ -94,6 +113,7 @@ void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev)
{
/* Enable the PCIe resource lock only after MD deep sleep is done */
t7xx_mhccif_mask_clr(t7xx_dev,
+ D2H_INT_DS_LOCK_ACK |
D2H_INT_SUSPEND_ACK |
D2H_INT_RESUME_ACK |
D2H_INT_SUSPEND_ACK_AP |
@@ -159,6 +179,79 @@ int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_en
return -ENXIO;
}
+int t7xx_pci_sleep_disable_complete(struct t7xx_pci_dev *t7xx_dev)
+{
+ struct device *dev = &t7xx_dev->pdev->dev;
+ int ret;
+
+ ret = wait_for_completion_timeout(&t7xx_dev->sleep_lock_acquire,
+ msecs_to_jiffies(PM_SLEEP_DIS_TIMEOUT_MS));
+ if (!ret)
+ dev_err_ratelimited(dev, "Resource wait complete timed out\n");
+
+ return ret;
+}
+
+/**
+ * t7xx_pci_disable_sleep() - Disable deep sleep capability.
+ * @t7xx_dev: MTK device.
+ *
+ * Lock the deep sleep capability, note that the device can still go into deep sleep
+ * state while device is in D0 state, from the host's point-of-view.
+ *
+ * If device is in deep sleep state, wake up the device and disable deep sleep capability.
+ */
+void t7xx_pci_disable_sleep(struct t7xx_pci_dev *t7xx_dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags);
+ t7xx_dev->sleep_disable_count++;
+ if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED)
+ goto unlock_and_complete;
+
+ if (t7xx_dev->sleep_disable_count == 1) {
+ u32 status;
+
+ reinit_completion(&t7xx_dev->sleep_lock_acquire);
+ t7xx_dev_set_sleep_capability(t7xx_dev, false);
+
+ status = ioread32(IREG_BASE(t7xx_dev) + T7XX_PCIE_RESOURCE_STATUS);
+ if (status & T7XX_PCIE_RESOURCE_STS_MSK)
+ goto unlock_and_complete;
+
+ t7xx_mhccif_h2d_swint_trigger(t7xx_dev, H2D_CH_DS_LOCK);
+ }
+ spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
+ return;
+
+unlock_and_complete:
+ spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
+ complete_all(&t7xx_dev->sleep_lock_acquire);
+}
+
+/**
+ * t7xx_pci_enable_sleep() - Enable deep sleep capability.
+ * @t7xx_dev: MTK device.
+ *
+ * After enabling deep sleep, device can enter into deep sleep state.
+ */
+void t7xx_pci_enable_sleep(struct t7xx_pci_dev *t7xx_dev)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&t7xx_dev->md_pm_lock, flags);
+ t7xx_dev->sleep_disable_count--;
+ if (atomic_read(&t7xx_dev->md_pm_state) < MTK_PM_RESUMED)
+ goto unlock;
+
+ if (t7xx_dev->sleep_disable_count == 0)
+ t7xx_dev_set_sleep_capability(t7xx_dev, true);
+
+unlock:
+ spin_unlock_irqrestore(&t7xx_dev->md_pm_lock, flags);
+}
+
static int t7xx_send_pm_request(struct t7xx_pci_dev *t7xx_dev, u32 request)
{
unsigned long wait_ret;
diff --git a/drivers/net/wwan/t7xx/t7xx_pci.h b/drivers/net/wwan/t7xx/t7xx_pci.h
index f51fc5a1301f..50b37056ce5a 100644
--- a/drivers/net/wwan/t7xx/t7xx_pci.h
+++ b/drivers/net/wwan/t7xx/t7xx_pci.h
@@ -21,6 +21,7 @@
#include <linux/irqreturn.h>
#include <linux/mutex.h>
#include <linux/pci.h>
+#include <linux/spinlock.h>
#include <linux/types.h>
#include "t7xx_reg.h"
@@ -55,6 +56,9 @@ typedef irqreturn_t (*t7xx_intr_callback)(int irq, void *param);
* @md_pm_entity_mtx: protects md_pm_entities list
* @pm_sr_ack: ack from the device when went to sleep or woke up
* @md_pm_state: state for resume/suspend
+ * @md_pm_lock: protects PCIe sleep lock
+ * @sleep_disable_count: PCIe L1.2 lock counter
+ * @sleep_lock_acquire: indicates that sleep has been disabled
*/
struct t7xx_pci_dev {
t7xx_intr_callback intr_handler[EXT_INT_NUM];
@@ -71,6 +75,9 @@ struct t7xx_pci_dev {
struct mutex md_pm_entity_mtx; /* Protects MD PM entities list */
struct completion pm_sr_ack;
atomic_t md_pm_state;
+ spinlock_t md_pm_lock; /* Protects PCI resource lock */
+ unsigned int sleep_disable_count;
+ struct completion sleep_lock_acquire;
};
enum t7xx_pm_id {
@@ -102,6 +109,9 @@ struct md_pm_entity {
void *entity_param;
};
+void t7xx_pci_disable_sleep(struct t7xx_pci_dev *t7xx_dev);
+void t7xx_pci_enable_sleep(struct t7xx_pci_dev *t7xx_dev);
+int t7xx_pci_sleep_disable_complete(struct t7xx_pci_dev *t7xx_dev);
int t7xx_pci_pm_entity_register(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity);
int t7xx_pci_pm_entity_unregister(struct t7xx_pci_dev *t7xx_dev, struct md_pm_entity *pm_entity);
void t7xx_pci_pm_init_late(struct t7xx_pci_dev *t7xx_dev);