summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--block/blk-core.c8
-rw-r--r--block/blk-throttle.c49
-rw-r--r--block/blk.h6
3 files changed, 22 insertions, 41 deletions
diff --git a/block/blk-core.c b/block/blk-core.c
index 149149dd7f7b..6c491f2388e9 100644
--- a/block/blk-core.c
+++ b/block/blk-core.c
@@ -1515,12 +1515,8 @@ generic_make_request_checks(struct bio *bio)
goto end_io;
}
- if (blk_throtl_bio(q, &bio))
- goto end_io;
-
- /* if bio = NULL, bio has been throttled and will be submitted later. */
- if (!bio)
- return false;
+ if (blk_throtl_bio(q, bio))
+ return false; /* throttled, will be resubmitted later */
trace_block_bio_queue(q, bio);
return true;
diff --git a/block/blk-throttle.c b/block/blk-throttle.c
index ecba5fcef201..900a0c98745b 100644
--- a/block/blk-throttle.c
+++ b/block/blk-throttle.c
@@ -303,10 +303,6 @@ throtl_grp *throtl_find_tg(struct throtl_data *td, struct blkio_cgroup *blkcg)
return tg;
}
-/*
- * This function returns with queue lock unlocked in case of error, like
- * request queue is no more
- */
static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
{
struct throtl_grp *tg = NULL, *__tg = NULL;
@@ -330,20 +326,16 @@ static struct throtl_grp * throtl_get_tg(struct throtl_data *td)
spin_unlock_irq(q->queue_lock);
tg = throtl_alloc_tg(td);
- /*
- * We might have slept in group allocation. Make sure queue is not
- * dead
- */
- if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
- if (tg)
- kfree(tg);
-
- return ERR_PTR(-ENODEV);
- }
/* Group allocated and queue is still alive. take the lock */
spin_lock_irq(q->queue_lock);
+ /* Make sure @q is still alive */
+ if (unlikely(test_bit(QUEUE_FLAG_DEAD, &q->queue_flags))) {
+ kfree(tg);
+ return NULL;
+ }
+
/*
* Initialize the new group. After sleeping, read the blkcg again.
*/
@@ -1118,17 +1110,17 @@ static struct blkio_policy_type blkio_policy_throtl = {
.plid = BLKIO_POLICY_THROTL,
};
-int blk_throtl_bio(struct request_queue *q, struct bio **biop)
+bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
{
struct throtl_data *td = q->td;
struct throtl_grp *tg;
- struct bio *bio = *biop;
bool rw = bio_data_dir(bio), update_disptime = true;
struct blkio_cgroup *blkcg;
+ bool throttled = false;
if (bio->bi_rw & REQ_THROTTLED) {
bio->bi_rw &= ~REQ_THROTTLED;
- return 0;
+ goto out;
}
/*
@@ -1147,7 +1139,7 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop)
blkiocg_update_dispatch_stats(&tg->blkg, bio->bi_size,
rw, rw_is_sync(bio->bi_rw));
rcu_read_unlock();
- return 0;
+ goto out;
}
}
rcu_read_unlock();
@@ -1156,18 +1148,10 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop)
* Either group has not been allocated yet or it is not an unlimited
* IO group
*/
-
spin_lock_irq(q->queue_lock);
tg = throtl_get_tg(td);
-
- if (IS_ERR(tg)) {
- if (PTR_ERR(tg) == -ENODEV) {
- /*
- * Queue is gone. No queue lock held here.
- */
- return -ENODEV;
- }
- }
+ if (unlikely(!tg))
+ goto out_unlock;
if (tg->nr_queued[rw]) {
/*
@@ -1195,7 +1179,7 @@ int blk_throtl_bio(struct request_queue *q, struct bio **biop)
* So keep on trimming slice even if bio is not queued.
*/
throtl_trim_slice(td, tg, rw);
- goto out;
+ goto out_unlock;
}
queue_bio:
@@ -1207,16 +1191,17 @@ queue_bio:
tg->nr_queued[READ], tg->nr_queued[WRITE]);
throtl_add_bio_tg(q->td, tg, bio);
- *biop = NULL;
+ throttled = true;
if (update_disptime) {
tg_update_disptime(td, tg);
throtl_schedule_next_dispatch(td);
}
-out:
+out_unlock:
spin_unlock_irq(q->queue_lock);
- return 0;
+out:
+ return throttled;
}
int blk_throtl_init(struct request_queue *q)
diff --git a/block/blk.h b/block/blk.h
index 2b66dc21a493..c018dba4e335 100644
--- a/block/blk.h
+++ b/block/blk.h
@@ -190,13 +190,13 @@ static inline int blk_do_io_stat(struct request *rq)
}
#ifdef CONFIG_BLK_DEV_THROTTLING
-extern int blk_throtl_bio(struct request_queue *q, struct bio **bio);
+extern bool blk_throtl_bio(struct request_queue *q, struct bio *bio);
extern int blk_throtl_init(struct request_queue *q);
extern void blk_throtl_exit(struct request_queue *q);
#else /* CONFIG_BLK_DEV_THROTTLING */
-static inline int blk_throtl_bio(struct request_queue *q, struct bio **bio)
+static inline bool blk_throtl_bio(struct request_queue *q, struct bio *bio)
{
- return 0;
+ return false;
}
static inline int blk_throtl_init(struct request_queue *q) { return 0; }
static inline void blk_throtl_exit(struct request_queue *q) { }