summaryrefslogtreecommitdiffstats
path: root/drivers/md/md.c
diff options
context:
space:
mode:
authorGuoqing Jiang <guoqing.jiang@cloud.ionos.com>2019-12-23 10:49:01 +0100
committerSong Liu <songliubraving@fb.com>2020-01-13 11:44:10 -0800
commit025471f9f50fede6527c70336484becbcb2aff28 (patch)
tree9707bbe6e363c05cced1537261066cf3cf51fdb8 /drivers/md/md.c
parent69b00b5bb23552d43e8bbed73ef6624604bb94a2 (diff)
downloadlinux-025471f9f50fede6527c70336484becbcb2aff28.tar.gz
linux-025471f9f50fede6527c70336484becbcb2aff28.tar.bz2
linux-025471f9f50fede6527c70336484becbcb2aff28.zip
md/raid1: use bucket based mechanism for IO serialization
Since raid1 had already used bucket based mechanism to reduce the conflict between write IO and resync IO, it is possible to speed up performance for io serialization with refer to the same mechanism. To align with the barrier bucket mechanism, we created arrays (with the same number of BARRIER_BUCKETS_NR) for spinlock, rb tree and waitqueue. Then we can reduce lock competition with multiple spinlocks, boost search performance with multiple rb trees and also reduce thundering herd problem with multiple waitqueues. Signed-off-by: Guoqing Jiang <guoqing.jiang@cloud.ionos.com> Signed-off-by: Song Liu <songliubraving@fb.com>
Diffstat (limited to 'drivers/md/md.c')
-rw-r--r--drivers/md/md.c18
1 files changed, 13 insertions, 5 deletions
diff --git a/drivers/md/md.c b/drivers/md/md.c
index 9c4e61c988ac..4824d50526fa 100644
--- a/drivers/md/md.c
+++ b/drivers/md/md.c
@@ -130,7 +130,7 @@ static void rdev_uninit_serial(struct md_rdev *rdev)
if (!test_and_clear_bit(CollisionCheck, &rdev->flags))
return;
- kfree(rdev->serial);
+ kvfree(rdev->serial);
rdev->serial = NULL;
}
@@ -144,18 +144,26 @@ static void rdevs_uninit_serial(struct mddev *mddev)
static int rdev_init_serial(struct md_rdev *rdev)
{
+ /* serial_nums equals with BARRIER_BUCKETS_NR */
+ int i, serial_nums = 1 << ((PAGE_SHIFT - ilog2(sizeof(atomic_t))));
struct serial_in_rdev *serial = NULL;
if (test_bit(CollisionCheck, &rdev->flags))
return 0;
- serial = kmalloc(sizeof(struct serial_in_rdev), GFP_KERNEL);
+ serial = kvmalloc(sizeof(struct serial_in_rdev) * serial_nums,
+ GFP_KERNEL);
if (!serial)
return -ENOMEM;
- spin_lock_init(&serial->serial_lock);
- serial->serial_rb = RB_ROOT_CACHED;
- init_waitqueue_head(&serial->serial_io_wait);
+ for (i = 0; i < serial_nums; i++) {
+ struct serial_in_rdev *serial_tmp = &serial[i];
+
+ spin_lock_init(&serial_tmp->serial_lock);
+ serial_tmp->serial_rb = RB_ROOT_CACHED;
+ init_waitqueue_head(&serial_tmp->serial_io_wait);
+ }
+
rdev->serial = serial;
set_bit(CollisionCheck, &rdev->flags);