summaryrefslogtreecommitdiffstats
path: root/drivers/block
diff options
context:
space:
mode:
authorLars Ellenberg <lars.ellenberg@linbit.com>2010-05-14 19:08:55 +0200
committerPhilipp Reisner <philipp.reisner@linbit.com>2010-05-18 02:02:36 +0200
commitbb3d000cb99aa0924b78c1ae5f5943484527868a (patch)
tree276a13a6f1484ae255725d0faab55ff071b68a98 /drivers/block
parent45bb912bd5ea4d2b3a270a93cbdf767a0e2df6f5 (diff)
downloadlinux-bb3d000cb99aa0924b78c1ae5f5943484527868a.tar.gz
linux-bb3d000cb99aa0924b78c1ae5f5943484527868a.tar.bz2
linux-bb3d000cb99aa0924b78c1ae5f5943484527868a.zip
drbd: allow resync requests to be larger than max_segment_size
this should allow for better background resync performance. Signed-off-by: Philipp Reisner <philipp.reisner@linbit.com> Signed-off-by: Lars Ellenberg <lars.ellenberg@linbit.com>
Diffstat (limited to 'drivers/block')
-rw-r--r--drivers/block/drbd/drbd_worker.c13
1 files changed, 6 insertions, 7 deletions
diff --git a/drivers/block/drbd/drbd_worker.c b/drivers/block/drbd/drbd_worker.c
index d771b1e0424b..91085c1ab52f 100644
--- a/drivers/block/drbd/drbd_worker.c
+++ b/drivers/block/drbd/drbd_worker.c
@@ -462,7 +462,7 @@ int w_make_resync_request(struct drbd_conf *mdev,
unsigned long bit;
sector_t sector;
const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
- int max_segment_size = queue_max_segment_size(mdev->rq_queue);
+ int max_segment_size;
int number, i, size, pe, mx;
int align, queued, sndbuf;
@@ -488,6 +488,11 @@ int w_make_resync_request(struct drbd_conf *mdev,
return 1;
}
+ /* starting with drbd 8.3.8, we can handle multi-bio EEs,
+ * if it should be necessary */
+ max_segment_size = mdev->agreed_pro_version < 94 ?
+ queue_max_segment_size(mdev->rq_queue) : DRBD_MAX_SEGMENT_SIZE;
+
mdev->c_sync_rate = calc_resync_rate(mdev);
number = SLEEP_TIME * mdev->c_sync_rate / ((BM_BLOCK_SIZE / 1024) * HZ);
pe = atomic_read(&mdev->rs_pending_cnt);
@@ -552,12 +557,6 @@ next_sector:
*
* Additionally always align bigger requests, in order to
* be prepared for all stripe sizes of software RAIDs.
- *
- * we _do_ care about the agreed-upon q->max_segment_size
- * here, as splitting up the requests on the other side is more
- * difficult. the consequence is, that on lvm and md and other
- * "indirect" devices, this is dead code, since
- * q->max_segment_size will be PAGE_SIZE.
*/
align = 1;
for (;;) {