summaryrefslogtreecommitdiffstats
path: root/block
diff options
context:
space:
mode:
Diffstat (limited to 'block')
-rw-r--r--block/blk-iocost.c5
-rw-r--r--block/blk-pm.c41
2 files changed, 24 insertions, 22 deletions
diff --git a/block/blk-iocost.c b/block/blk-iocost.c
index 521c29b8ae29..413e0b5c8e6b 100644
--- a/block/blk-iocost.c
+++ b/block/blk-iocost.c
@@ -406,7 +406,7 @@ struct ioc {
enum ioc_running running;
atomic64_t vtime_rate;
- seqcount_t period_seqcount;
+ seqcount_spinlock_t period_seqcount;
u32 period_at; /* wallclock starttime */
u64 period_at_vtime; /* vtime starttime */
@@ -873,7 +873,6 @@ static void ioc_now(struct ioc *ioc, struct ioc_now *now)
static void ioc_start_period(struct ioc *ioc, struct ioc_now *now)
{
- lockdep_assert_held(&ioc->lock);
WARN_ON_ONCE(ioc->running != IOC_RUNNING);
write_seqcount_begin(&ioc->period_seqcount);
@@ -2001,7 +2000,7 @@ static int blk_iocost_init(struct request_queue *q)
ioc->running = IOC_IDLE;
atomic64_set(&ioc->vtime_rate, VTIME_PER_USEC);
- seqcount_init(&ioc->period_seqcount);
+ seqcount_spinlock_init(&ioc->period_seqcount, &ioc->lock);
ioc->period_at = ktime_to_us(ktime_get());
atomic64_set(&ioc->cur_period, 0);
atomic_set(&ioc->hweight_gen, 0);
diff --git a/block/blk-pm.c b/block/blk-pm.c
index 1adc1cd748b4..b85234d758f7 100644
--- a/block/blk-pm.c
+++ b/block/blk-pm.c
@@ -164,9 +164,8 @@ EXPORT_SYMBOL(blk_pre_runtime_resume);
*
* Description:
* Update the queue's runtime status according to the return value of the
- * device's runtime_resume function. If it is successfully resumed, process
- * the requests that are queued into the device's queue when it is resuming
- * and then mark last busy and initiate autosuspend for it.
+ * device's runtime_resume function. If the resume was successful, call
+ * blk_set_runtime_active() to do the real work of restarting the queue.
*
* This function should be called near the end of the device's
* runtime_resume callback.
@@ -175,19 +174,13 @@ void blk_post_runtime_resume(struct request_queue *q, int err)
{
if (!q->dev)
return;
-
- spin_lock_irq(&q->queue_lock);
if (!err) {
- q->rpm_status = RPM_ACTIVE;
- pm_runtime_mark_last_busy(q->dev);
- pm_request_autosuspend(q->dev);
+ blk_set_runtime_active(q);
} else {
+ spin_lock_irq(&q->queue_lock);
q->rpm_status = RPM_SUSPENDED;
+ spin_unlock_irq(&q->queue_lock);
}
- spin_unlock_irq(&q->queue_lock);
-
- if (!err)
- blk_clear_pm_only(q);
}
EXPORT_SYMBOL(blk_post_runtime_resume);
@@ -204,15 +197,25 @@ EXPORT_SYMBOL(blk_post_runtime_resume);
* This function can be used in driver's resume hook to correct queue
* runtime PM status and re-enable peeking requests from the queue. It
* should be called before first request is added to the queue.
+ *
+ * This function is also called by blk_post_runtime_resume() for successful
+ * runtime resumes. It does everything necessary to restart the queue.
*/
void blk_set_runtime_active(struct request_queue *q)
{
- if (q->dev) {
- spin_lock_irq(&q->queue_lock);
- q->rpm_status = RPM_ACTIVE;
- pm_runtime_mark_last_busy(q->dev);
- pm_request_autosuspend(q->dev);
- spin_unlock_irq(&q->queue_lock);
- }
+ int old_status;
+
+ if (!q->dev)
+ return;
+
+ spin_lock_irq(&q->queue_lock);
+ old_status = q->rpm_status;
+ q->rpm_status = RPM_ACTIVE;
+ pm_runtime_mark_last_busy(q->dev);
+ pm_request_autosuspend(q->dev);
+ spin_unlock_irq(&q->queue_lock);
+
+ if (old_status != RPM_ACTIVE)
+ blk_clear_pm_only(q);
}
EXPORT_SYMBOL(blk_set_runtime_active);