summaryrefslogtreecommitdiffstats
path: root/drivers/md
diff options
context:
space:
mode:
authorMarkus Stockhausen <stockhausen@collogia.de>2014-12-15 12:57:05 +1100
committerNeilBrown <neilb@suse.de>2015-04-22 08:00:42 +1000
commitd06f191f8ecaef4d524e765fdb455f96392fbd42 (patch)
tree3e83449b9fb3c910a7a6d65a776304dd158e18c4 /drivers/md
parent584acdd49cd2472ca0f5a06adbe979db82d0b4af (diff)
downloadlinux-d06f191f8ecaef4d524e765fdb455f96392fbd42.tar.gz
linux-d06f191f8ecaef4d524e765fdb455f96392fbd42.tar.bz2
linux-d06f191f8ecaef4d524e765fdb455f96392fbd42.zip
md/raid5: introduce configuration option rmw_level
Depending on the available coding we allow optimized rmw logic for write operations. To support easier testing this patch allows manual control of the rmw/rcw descision through the interface /sys/block/mdX/md/rmw_level. The configuration can handle three levels of control. rmw_level=0: Disable rmw for all RAID types. Hardware assisted P/Q calculation has no implementation path yet to factor in/out chunks of a syndrome. Enforcing this level can be benefical for slow CPUs with hardware syndrome support and fast SSDs. rmw_level=1: Estimate rmw IOs and rcw IOs. Execute rmw only if we will save IOs. This equals the "old" unpatched behaviour and will be the default. rmw_level=2: Execute rmw even if calculated IOs for rmw and rcw are equal. We might have higher CPU consumption because of calculating the parity twice but it can be benefical otherwise. E.g. RAID4 with fast dedicated parity disk/SSD. The option is implemented just to be forward-looking and will ONLY work with this patch! Signed-off-by: Markus Stockhausen <stockhausen@collogia.de> Signed-off-by: NeilBrown <neilb@suse.de>
Diffstat (limited to 'drivers/md')
-rw-r--r--drivers/md/raid5.c44
-rw-r--r--drivers/md/raid5.h1
2 files changed, 45 insertions, 0 deletions
diff --git a/drivers/md/raid5.c b/drivers/md/raid5.c
index c82ce1fd8723..f78b1964543b 100644
--- a/drivers/md/raid5.c
+++ b/drivers/md/raid5.c
@@ -5880,6 +5880,49 @@ raid5_stripecache_size = __ATTR(stripe_cache_size, S_IRUGO | S_IWUSR,
raid5_store_stripe_cache_size);
static ssize_t
+raid5_show_rmw_level(struct mddev *mddev, char *page)
+{
+ struct r5conf *conf = mddev->private;
+ if (conf)
+ return sprintf(page, "%d\n", conf->rmw_level);
+ else
+ return 0;
+}
+
+static ssize_t
+raid5_store_rmw_level(struct mddev *mddev, const char *page, size_t len)
+{
+ struct r5conf *conf = mddev->private;
+ unsigned long new;
+
+ if (!conf)
+ return -ENODEV;
+
+ if (len >= PAGE_SIZE)
+ return -EINVAL;
+
+ if (kstrtoul(page, 10, &new))
+ return -EINVAL;
+
+ if (new != PARITY_DISABLE_RMW && !raid6_call.xor_syndrome)
+ return -EINVAL;
+
+ if (new != PARITY_DISABLE_RMW &&
+ new != PARITY_ENABLE_RMW &&
+ new != PARITY_PREFER_RMW)
+ return -EINVAL;
+
+ conf->rmw_level = new;
+ return len;
+}
+
+static struct md_sysfs_entry
+raid5_rmw_level = __ATTR(rmw_level, S_IRUGO | S_IWUSR,
+ raid5_show_rmw_level,
+ raid5_store_rmw_level);
+
+
+static ssize_t
raid5_show_preread_threshold(struct mddev *mddev, char *page)
{
struct r5conf *conf;
@@ -6065,6 +6108,7 @@ static struct attribute *raid5_attrs[] = {
&raid5_preread_bypass_threshold.attr,
&raid5_group_thread_cnt.attr,
&raid5_skip_copy.attr,
+ &raid5_rmw_level.attr,
NULL,
};
static struct attribute_group raid5_attrs_group = {
diff --git a/drivers/md/raid5.h b/drivers/md/raid5.h
index 57fef9ba36fa..6614ac5ffc0e 100644
--- a/drivers/md/raid5.h
+++ b/drivers/md/raid5.h
@@ -362,6 +362,7 @@ enum {
enum {
PARITY_DISABLE_RMW = 0,
PARITY_ENABLE_RMW,
+ PARITY_PREFER_RMW,
};
/*