summaryrefslogtreecommitdiffstats
path: root/drivers/bus/mhi/ep/main.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/bus/mhi/ep/main.c')
-rw-r--r--drivers/bus/mhi/ep/main.c124
1 files changed, 122 insertions, 2 deletions
diff --git a/drivers/bus/mhi/ep/main.c b/drivers/bus/mhi/ep/main.c
index c912daf6dc65..4e82006bd83b 100644
--- a/drivers/bus/mhi/ep/main.c
+++ b/drivers/bus/mhi/ep/main.c
@@ -11,6 +11,7 @@
#include <linux/dma-direction.h>
#include <linux/interrupt.h>
#include <linux/io.h>
+#include <linux/irq.h>
#include <linux/mhi_ep.h>
#include <linux/mod_devicetable.h>
#include <linux/module.h>
@@ -142,6 +143,112 @@ static void mhi_ep_state_worker(struct work_struct *work)
}
}
+static void mhi_ep_queue_channel_db(struct mhi_ep_cntrl *mhi_cntrl, unsigned long ch_int,
+ u32 ch_idx)
+{
+ struct mhi_ep_ring_item *item;
+ struct mhi_ep_ring *ring;
+ bool work = !!ch_int;
+ LIST_HEAD(head);
+ u32 i;
+
+ /* First add the ring items to a local list */
+ for_each_set_bit(i, &ch_int, 32) {
+ /* Channel index varies for each register: 0, 32, 64, 96 */
+ u32 ch_id = ch_idx + i;
+
+ ring = &mhi_cntrl->mhi_chan[ch_id].ring;
+ item = kzalloc(sizeof(*item), GFP_ATOMIC);
+ if (!item)
+ return;
+
+ item->ring = ring;
+ list_add_tail(&item->node, &head);
+ }
+
+ /* Now, splice the local list into ch_db_list and queue the work item */
+ if (work) {
+ spin_lock(&mhi_cntrl->list_lock);
+ list_splice_tail_init(&head, &mhi_cntrl->ch_db_list);
+ spin_unlock(&mhi_cntrl->list_lock);
+ }
+}
+
+/*
+ * Channel interrupt statuses are contained in 4 registers each of 32bit length.
+ * For checking all interrupts, we need to loop through each registers and then
+ * check for bits set.
+ */
+static void mhi_ep_check_channel_interrupt(struct mhi_ep_cntrl *mhi_cntrl)
+{
+ u32 ch_int, ch_idx, i;
+
+ /* Bail out if there is no channel doorbell interrupt */
+ if (!mhi_ep_mmio_read_chdb_status_interrupts(mhi_cntrl))
+ return;
+
+ for (i = 0; i < MHI_MASK_ROWS_CH_DB; i++) {
+ ch_idx = i * MHI_MASK_CH_LEN;
+
+ /* Only process channel interrupt if the mask is enabled */
+ ch_int = mhi_cntrl->chdb[i].status & mhi_cntrl->chdb[i].mask;
+ if (ch_int) {
+ mhi_ep_queue_channel_db(mhi_cntrl, ch_int, ch_idx);
+ mhi_ep_mmio_write(mhi_cntrl, MHI_CHDB_INT_CLEAR_n(i),
+ mhi_cntrl->chdb[i].status);
+ }
+ }
+}
+
+static void mhi_ep_process_ctrl_interrupt(struct mhi_ep_cntrl *mhi_cntrl,
+ enum mhi_state state)
+{
+ struct mhi_ep_state_transition *item;
+
+ item = kzalloc(sizeof(*item), GFP_ATOMIC);
+ if (!item)
+ return;
+
+ item->state = state;
+ spin_lock(&mhi_cntrl->list_lock);
+ list_add_tail(&item->node, &mhi_cntrl->st_transition_list);
+ spin_unlock(&mhi_cntrl->list_lock);
+
+ queue_work(mhi_cntrl->wq, &mhi_cntrl->state_work);
+}
+
+/*
+ * Interrupt handler that services interrupts raised by the host writing to
+ * MHICTRL and Command ring doorbell (CRDB) registers for state change and
+ * channel interrupts.
+ */
+static irqreturn_t mhi_ep_irq(int irq, void *data)
+{
+ struct mhi_ep_cntrl *mhi_cntrl = data;
+ struct device *dev = &mhi_cntrl->mhi_dev->dev;
+ enum mhi_state state;
+ u32 int_value;
+
+ /* Acknowledge the ctrl interrupt */
+ int_value = mhi_ep_mmio_read(mhi_cntrl, MHI_CTRL_INT_STATUS);
+ mhi_ep_mmio_write(mhi_cntrl, MHI_CTRL_INT_CLEAR, int_value);
+
+ /* Check for ctrl interrupt */
+ if (FIELD_GET(MHI_CTRL_INT_STATUS_MSK, int_value)) {
+ dev_dbg(dev, "Processing ctrl interrupt\n");
+ mhi_ep_process_ctrl_interrupt(mhi_cntrl, state);
+ }
+
+ /* Check for command doorbell interrupt */
+ if (FIELD_GET(MHI_CTRL_INT_STATUS_CRDB_MSK, int_value))
+ dev_dbg(dev, "Processing command doorbell interrupt\n");
+
+ /* Check for channel interrupts */
+ mhi_ep_check_channel_interrupt(mhi_cntrl);
+
+ return IRQ_HANDLED;
+}
+
static void mhi_ep_release_device(struct device *dev)
{
struct mhi_ep_device *mhi_dev = to_mhi_ep_device(dev);
@@ -338,7 +445,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
struct mhi_ep_device *mhi_dev;
int ret;
- if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio)
+ if (!mhi_cntrl || !mhi_cntrl->cntrl_dev || !mhi_cntrl->mmio || !mhi_cntrl->irq)
return -EINVAL;
ret = mhi_ep_chan_init(mhi_cntrl, config);
@@ -360,6 +467,7 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
}
INIT_LIST_HEAD(&mhi_cntrl->st_transition_list);
+ INIT_LIST_HEAD(&mhi_cntrl->ch_db_list);
spin_lock_init(&mhi_cntrl->state_lock);
spin_lock_init(&mhi_cntrl->list_lock);
mutex_init(&mhi_cntrl->event_lock);
@@ -375,12 +483,20 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
mhi_cntrl->index = ret;
+ irq_set_status_flags(mhi_cntrl->irq, IRQ_NOAUTOEN);
+ ret = request_irq(mhi_cntrl->irq, mhi_ep_irq, IRQF_TRIGGER_HIGH,
+ "doorbell_irq", mhi_cntrl);
+ if (ret) {
+ dev_err(mhi_cntrl->cntrl_dev, "Failed to request Doorbell IRQ\n");
+ goto err_ida_free;
+ }
+
/* Allocate the controller device */
mhi_dev = mhi_ep_alloc_device(mhi_cntrl, MHI_DEVICE_CONTROLLER);
if (IS_ERR(mhi_dev)) {
dev_err(mhi_cntrl->cntrl_dev, "Failed to allocate controller device\n");
ret = PTR_ERR(mhi_dev);
- goto err_ida_free;
+ goto err_free_irq;
}
dev_set_name(&mhi_dev->dev, "mhi_ep%u", mhi_cntrl->index);
@@ -397,6 +513,8 @@ int mhi_ep_register_controller(struct mhi_ep_cntrl *mhi_cntrl,
err_put_dev:
put_device(&mhi_dev->dev);
+err_free_irq:
+ free_irq(mhi_cntrl->irq, mhi_cntrl);
err_ida_free:
ida_free(&mhi_ep_cntrl_ida, mhi_cntrl->index);
err_destroy_wq:
@@ -416,6 +534,8 @@ void mhi_ep_unregister_controller(struct mhi_ep_cntrl *mhi_cntrl)
destroy_workqueue(mhi_cntrl->wq);
+ free_irq(mhi_cntrl->irq, mhi_cntrl);
+
kfree(mhi_cntrl->mhi_cmd);
kfree(mhi_cntrl->mhi_chan);