summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorJens Axboe <axboe@fb.com>2014-11-17 10:43:05 -0700
committerJens Axboe <axboe@fb.com>2014-11-17 10:43:05 -0700
commit8d76d1015d86f2b66c872fbcaf46072228d757a5 (patch)
treeb9ee1be27323095734ee876c2b1481f0f2d44540
parente805b983d3fc852b80e30327e42c8c5f0c55c62c (diff)
parent7c7f2f2bc9a63f9605a16eabac59fc655dfe7c9a (diff)
downloadlinux-stable-8d76d1015d86f2b66c872fbcaf46072228d757a5.tar.gz
linux-stable-8d76d1015d86f2b66c872fbcaf46072228d757a5.tar.bz2
linux-stable-8d76d1015d86f2b66c872fbcaf46072228d757a5.zip
Merge branch 'for-3.19/core' into for-3.19/drivers
-rw-r--r--Documentation/block/biodoc.txt6
-rw-r--r--block/blk-mq.c39
-rw-r--r--fs/fs-writeback.c29
-rw-r--r--include/linux/blk-mq.h1
4 files changed, 53 insertions, 22 deletions
diff --git a/Documentation/block/biodoc.txt b/Documentation/block/biodoc.txt
index 2101e718670d..f1323c6b7ed2 100644
--- a/Documentation/block/biodoc.txt
+++ b/Documentation/block/biodoc.txt
@@ -946,7 +946,11 @@ elevator_allow_merge_fn called whenever the block layer determines
request safely. The io scheduler may still
want to stop a merge at this point if it
results in some sort of conflict internally,
- this hook allows it to do that.
+ this hook allows it to do that. Note however
+ that two *requests* can still be merged at later
+ time. Currently the io scheduler has no way to
+ prevent that. It can only learn about the fact
+ from elevator_merge_req_fn callback.
elevator_dispatch_fn* fills the dispatch queue with ready requests.
I/O schedulers are free to postpone requests by
diff --git a/block/blk-mq.c b/block/blk-mq.c
index b355b5957cd7..4347aa2be6ae 100644
--- a/block/blk-mq.c
+++ b/block/blk-mq.c
@@ -269,17 +269,25 @@ static void __blk_mq_free_request(struct blk_mq_hw_ctx *hctx,
blk_mq_queue_exit(q);
}
-void blk_mq_free_request(struct request *rq)
+void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *hctx, struct request *rq)
{
struct blk_mq_ctx *ctx = rq->mq_ctx;
- struct blk_mq_hw_ctx *hctx;
- struct request_queue *q = rq->q;
ctx->rq_completed[rq_is_sync(rq)]++;
-
- hctx = q->mq_ops->map_queue(q, ctx->cpu);
__blk_mq_free_request(hctx, ctx, rq);
+
+}
+EXPORT_SYMBOL_GPL(blk_mq_free_hctx_request);
+
+void blk_mq_free_request(struct request *rq)
+{
+ struct blk_mq_hw_ctx *hctx;
+ struct request_queue *q = rq->q;
+
+ hctx = q->mq_ops->map_queue(q, rq->mq_ctx->cpu);
+ blk_mq_free_hctx_request(hctx, rq);
}
+EXPORT_SYMBOL_GPL(blk_mq_free_request);
inline void __blk_mq_end_request(struct request *rq, int error)
{
@@ -801,9 +809,18 @@ void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async)
if (unlikely(test_bit(BLK_MQ_S_STOPPED, &hctx->state)))
return;
- if (!async && cpumask_test_cpu(smp_processor_id(), hctx->cpumask))
- __blk_mq_run_hw_queue(hctx);
- else if (hctx->queue->nr_hw_queues == 1)
+ if (!async) {
+ int cpu = get_cpu();
+ if (cpumask_test_cpu(cpu, hctx->cpumask)) {
+ __blk_mq_run_hw_queue(hctx);
+ put_cpu();
+ return;
+ }
+
+ put_cpu();
+ }
+
+ if (hctx->queue->nr_hw_queues == 1)
kblockd_schedule_delayed_work(&hctx->run_work, 0);
else {
unsigned int cpu;
@@ -824,9 +841,7 @@ void blk_mq_run_queues(struct request_queue *q, bool async)
test_bit(BLK_MQ_S_STOPPED, &hctx->state))
continue;
- preempt_disable();
blk_mq_run_hw_queue(hctx, async);
- preempt_enable();
}
}
EXPORT_SYMBOL(blk_mq_run_queues);
@@ -853,9 +868,7 @@ void blk_mq_start_hw_queue(struct blk_mq_hw_ctx *hctx)
{
clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
- preempt_disable();
blk_mq_run_hw_queue(hctx, false);
- preempt_enable();
}
EXPORT_SYMBOL(blk_mq_start_hw_queue);
@@ -880,9 +893,7 @@ void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async)
continue;
clear_bit(BLK_MQ_S_STOPPED, &hctx->state);
- preempt_disable();
blk_mq_run_hw_queue(hctx, async);
- preempt_enable();
}
}
EXPORT_SYMBOL(blk_mq_start_stopped_hw_queues);
diff --git a/fs/fs-writeback.c b/fs/fs-writeback.c
index ef9bef118342..2d609a5fbfea 100644
--- a/fs/fs-writeback.c
+++ b/fs/fs-writeback.c
@@ -479,12 +479,28 @@ __writeback_single_inode(struct inode *inode, struct writeback_control *wbc)
* write_inode()
*/
spin_lock(&inode->i_lock);
- /* Clear I_DIRTY_PAGES if we've written out all dirty pages */
- if (!mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
- inode->i_state &= ~I_DIRTY_PAGES;
+
dirty = inode->i_state & I_DIRTY;
- inode->i_state &= ~(I_DIRTY_SYNC | I_DIRTY_DATASYNC);
+ inode->i_state &= ~I_DIRTY;
+
+ /*
+ * Paired with smp_mb() in __mark_inode_dirty(). This allows
+ * __mark_inode_dirty() to test i_state without grabbing i_lock -
+ * either they see the I_DIRTY bits cleared or we see the dirtied
+ * inode.
+ *
+ * I_DIRTY_PAGES is always cleared together above even if @mapping
+ * still has dirty pages. The flag is reinstated after smp_mb() if
+ * necessary. This guarantees that either __mark_inode_dirty()
+ * sees clear I_DIRTY_PAGES or we see PAGECACHE_TAG_DIRTY.
+ */
+ smp_mb();
+
+ if (mapping_tagged(mapping, PAGECACHE_TAG_DIRTY))
+ inode->i_state |= I_DIRTY_PAGES;
+
spin_unlock(&inode->i_lock);
+
/* Don't write the inode if only I_DIRTY_PAGES was set */
if (dirty & (I_DIRTY_SYNC | I_DIRTY_DATASYNC)) {
int err = write_inode(inode, wbc);
@@ -1148,12 +1164,11 @@ void __mark_inode_dirty(struct inode *inode, int flags)
}
/*
- * make sure that changes are seen by all cpus before we test i_state
- * -- mikulas
+ * Paired with smp_mb() in __writeback_single_inode() for the
+ * following lockless i_state test. See there for details.
*/
smp_mb();
- /* avoid the locking if we can */
if ((inode->i_state & flags) == flags)
return;
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index c3b64ec5321e..fb0a4fb3dc2b 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -169,6 +169,7 @@ void blk_mq_flush_plug_list(struct blk_plug *plug, bool from_schedule);
void blk_mq_insert_request(struct request *, bool, bool, bool);
void blk_mq_run_queues(struct request_queue *q, bool async);
void blk_mq_free_request(struct request *rq);
+void blk_mq_free_hctx_request(struct blk_mq_hw_ctx *, struct request *rq);
bool blk_mq_can_queue(struct blk_mq_hw_ctx *);
struct request *blk_mq_alloc_request(struct request_queue *q, int rw,
gfp_t gfp, bool reserved);