summaryrefslogtreecommitdiffstats
path: root/drivers/message/fusion/mptsas.c
diff options
context:
space:
mode:
authorSebastian Andrzej Siewior <bigeasy@linutronix.de>2020-11-26 14:29:52 +0100
committerMartin K. Petersen <martin.petersen@oracle.com>2020-12-01 00:03:54 -0500
commit817a7c996786f803a8b5528ca11a842eed88e01f (patch)
treeba2630d060adf0c3156a6d5901546b561e5834bc /drivers/message/fusion/mptsas.c
parentb8a5144370bc59dbb192b8f29298920ceadc3d1e (diff)
downloadlinux-817a7c996786f803a8b5528ca11a842eed88e01f.tar.gz
linux-817a7c996786f803a8b5528ca11a842eed88e01f.tar.bz2
linux-817a7c996786f803a8b5528ca11a842eed88e01f.zip
scsi: message: fusion: Remove in_interrupt() usage in mptsas_cleanup_fw_event_q()
mptsas_cleanup_fw_event_q() uses in_interrupt() to determine if it is safe to cancel a worker item. Aside of that in_interrupt() is deprecated as it does not provide what the name suggests. It covers more than hard/soft interrupt servicing context and is semantically ill defined. Looking closer there are a few problems with the current construct: - It could be invoked from an interrupt handler / non-blocking context because cancel_delayed_work() has no such restriction. Also, mptsas_free_fw_event() has no such restriction. - The list is accessed unlocked. It may dequeue a valid work-item but at the time of invoking cancel_delayed_work() the memory may be released or reused because the worker has already run. mptsas_cleanup_fw_event_q() is invoked via mptsas_shutdown() which is always invoked from preemtible context on device shutdown. It is also invoked via mptsas_ioc_reset(, MPT_IOC_POST_RESET) which is a MptResetHandlers callback. The only caller here are mpt_SoftResetHandler(), mpt_HardResetHandler() and mpt_Soft_Hard_ResetHandler(). All these functions have a `sleepFlag' argument and each caller uses caller uses `CAN_SLEEP' here and according to current documentation: | @sleepFlag: Indicates if sleep or schedule must be called So it is safe to sleep. Add mptsas_hotplug_event::users member. Initialize it to one by default so mptsas_free_fw_event() will free the memory. mptsas_cleanup_fw_event_q() will increment its value for items it dequeues and then it may keep a pointer after dropping the lock. Invoke cancel_delayed_work_sync() to cancel the work item and wait if the worker is currently busy. Free the memory afterwards since it owns the last reference to it. Link: https://lore.kernel.org/r/20201126132952.2287996-15-bigeasy@linutronix.de Cc: Sathya Prakash <sathya.prakash@broadcom.com> Cc: Sreekanth Reddy <sreekanth.reddy@broadcom.com> Cc: Suganath Prabu Subramani <suganath-prabu.subramani@broadcom.com> Cc: MPT-FusionLinux.pdl@broadcom.com Reviewed-by: Daniel Wagner <dwagner@suse.de> Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de> Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/message/fusion/mptsas.c')
-rw-r--r--drivers/message/fusion/mptsas.c45
1 files changed, 35 insertions, 10 deletions
diff --git a/drivers/message/fusion/mptsas.c b/drivers/message/fusion/mptsas.c
index 18b91ea1a353..5eb0b3361e4e 100644
--- a/drivers/message/fusion/mptsas.c
+++ b/drivers/message/fusion/mptsas.c
@@ -289,6 +289,7 @@ mptsas_add_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
spin_lock_irqsave(&ioc->fw_event_lock, flags);
list_add_tail(&fw_event->list, &ioc->fw_event_list);
+ fw_event->users = 1;
INIT_DELAYED_WORK(&fw_event->work, mptsas_firmware_event_work);
devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: add (fw_event=0x%p)"
"on cpuid %d\n", ioc->name, __func__,
@@ -314,6 +315,15 @@ mptsas_requeue_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event,
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
}
+static void __mptsas_free_fw_event(MPT_ADAPTER *ioc,
+ struct fw_event_work *fw_event)
+{
+ devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: kfree (fw_event=0x%p)\n",
+ ioc->name, __func__, fw_event));
+ list_del(&fw_event->list);
+ kfree(fw_event);
+}
+
/* free memory associated to a sas firmware event */
static void
mptsas_free_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event)
@@ -321,10 +331,9 @@ mptsas_free_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event)
unsigned long flags;
spin_lock_irqsave(&ioc->fw_event_lock, flags);
- devtprintk(ioc, printk(MYIOC_s_DEBUG_FMT "%s: kfree (fw_event=0x%p)\n",
- ioc->name, __func__, fw_event));
- list_del(&fw_event->list);
- kfree(fw_event);
+ fw_event->users--;
+ if (!fw_event->users)
+ __mptsas_free_fw_event(ioc, fw_event);
spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
}
@@ -333,9 +342,10 @@ mptsas_free_fw_event(MPT_ADAPTER *ioc, struct fw_event_work *fw_event)
static void
mptsas_cleanup_fw_event_q(MPT_ADAPTER *ioc)
{
- struct fw_event_work *fw_event, *next;
+ struct fw_event_work *fw_event;
struct mptsas_target_reset_event *target_reset_list, *n;
MPT_SCSI_HOST *hd = shost_priv(ioc->sh);
+ unsigned long flags;
/* flush the target_reset_list */
if (!list_empty(&hd->target_reset_list)) {
@@ -350,14 +360,29 @@ mptsas_cleanup_fw_event_q(MPT_ADAPTER *ioc)
}
}
- if (list_empty(&ioc->fw_event_list) ||
- !ioc->fw_event_q || in_interrupt())
+ if (list_empty(&ioc->fw_event_list) || !ioc->fw_event_q)
return;
- list_for_each_entry_safe(fw_event, next, &ioc->fw_event_list, list) {
- if (cancel_delayed_work(&fw_event->work))
- mptsas_free_fw_event(ioc, fw_event);
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+
+ while (!list_empty(&ioc->fw_event_list)) {
+ bool canceled = false;
+
+ fw_event = list_first_entry(&ioc->fw_event_list,
+ struct fw_event_work, list);
+ fw_event->users++;
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
+ if (cancel_delayed_work_sync(&fw_event->work))
+ canceled = true;
+
+ spin_lock_irqsave(&ioc->fw_event_lock, flags);
+ if (canceled)
+ fw_event->users--;
+ fw_event->users--;
+ WARN_ON_ONCE(fw_event->users);
+ __mptsas_free_fw_event(ioc, fw_event);
}
+ spin_unlock_irqrestore(&ioc->fw_event_lock, flags);
}