summaryrefslogtreecommitdiffstats
path: root/include/linux/blkdev.h
diff options
context:
space:
mode:
authorOmar Sandoval <osandov@fb.com>2018-05-09 02:08:51 -0700
committerJens Axboe <axboe@kernel.dk>2018-05-09 08:33:06 -0600
commit84c7afcebed913c93d50f116b046b7f0d8ec0cdc (patch)
tree0c4b9bc41c6ef64b53ee51251ee3b957b2533d05 /include/linux/blkdev.h
parent544ccc8dc904db55d4576c27a1eb66a888ffacea (diff)
downloadlinux-stable-84c7afcebed913c93d50f116b046b7f0d8ec0cdc.tar.gz
linux-stable-84c7afcebed913c93d50f116b046b7f0d8ec0cdc.tar.bz2
linux-stable-84c7afcebed913c93d50f116b046b7f0d8ec0cdc.zip
block: use ktime_get_ns() instead of sched_clock() for cfq and bfq
cfq and bfq have some internal fields that use sched_clock() which can trivially use ktime_get_ns() instead. Their timestamp fields in struct request can also use ktime_get_ns(), which resolves the 8 year old comment added by commit 28f4197e5d47 ("block: disable preemption before using sched_clock()"). Signed-off-by: Omar Sandoval <osandov@fb.com> Signed-off-by: Jens Axboe <axboe@kernel.dk>
Diffstat (limited to 'include/linux/blkdev.h')
-rw-r--r--include/linux/blkdev.h21
1 files changed, 6 insertions, 15 deletions
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index f2c2fc011e6b..9ef412666df1 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1799,42 +1799,33 @@ int kblockd_schedule_work_on(int cpu, struct work_struct *work);
int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
#ifdef CONFIG_BLK_CGROUP
-/*
- * This should not be using sched_clock(). A real patch is in progress
- * to fix this up, until that is in place we need to disable preemption
- * around sched_clock() in this function and set_io_start_time_ns().
- */
static inline void set_start_time_ns(struct request *req)
{
- preempt_disable();
- req->cgroup_start_time_ns = sched_clock();
- preempt_enable();
+ req->cgroup_start_time_ns = ktime_get_ns();
}
static inline void set_io_start_time_ns(struct request *req)
{
- preempt_disable();
- req->cgroup_io_start_time_ns = sched_clock();
- preempt_enable();
+ req->cgroup_io_start_time_ns = ktime_get_ns();
}
-static inline uint64_t rq_start_time_ns(struct request *req)
+static inline u64 rq_start_time_ns(struct request *req)
{
return req->cgroup_start_time_ns;
}
-static inline uint64_t rq_io_start_time_ns(struct request *req)
+static inline u64 rq_io_start_time_ns(struct request *req)
{
return req->cgroup_io_start_time_ns;
}
#else
static inline void set_start_time_ns(struct request *req) {}
static inline void set_io_start_time_ns(struct request *req) {}
-static inline uint64_t rq_start_time_ns(struct request *req)
+static inline u64 rq_start_time_ns(struct request *req)
{
return 0;
}
-static inline uint64_t rq_io_start_time_ns(struct request *req)
+static inline u64 rq_io_start_time_ns(struct request *req)
{
return 0;
}