summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMike Christie <mchristi@redhat.com>2016-06-02 20:12:37 -0500
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2016-08-20 18:11:03 +0200
commit6bcdc94d697ea1ffe7497bd042feec29d3199c47 (patch)
tree8cf5cfde8481944c26f8aa7c76c64d4023130735
parent6816be83ff4637a6c6912fb720b578e0c9a10f10 (diff)
downloadlinux-stable-6bcdc94d697ea1ffe7497bd042feec29d3199c47.tar.gz
linux-stable-6bcdc94d697ea1ffe7497bd042feec29d3199c47.tar.bz2
linux-stable-6bcdc94d697ea1ffe7497bd042feec29d3199c47.zip
target: Fix max_unmap_lba_count calc overflow
commit ea263c7fada4af8ec7fe5fcfd6e7d7705a89351b upstream. max_discard_sectors only 32bits, and some non scsi backend devices will set this to the max 0xffffffff, so we can end up overflowing during the max_unmap_lba_count calculation. This fixes a regression caused by my patch: commit 8a9ebe717a133ba7bc90b06047f43cc6b8bcb8b3 Author: Mike Christie <mchristi@redhat.com> Date: Mon Jan 18 14:09:27 2016 -0600 target: Fix WRITE_SAME/DISCARD conversion to linux 512b sectors which can result in extra discards being sent to due the overflow causing max_unmap_lba_count to be smaller than what the backing device can actually support. Signed-off-by: Mike Christie <mchristi@redhat.com> Reviewed-by: Bart Van Assche <bart.vanassche@sandisk.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--drivers/target/target_core_device.c8
-rw-r--r--drivers/target/target_core_file.c3
-rw-r--r--drivers/target/target_core_iblock.c3
-rw-r--r--include/target/target_core_backend.h2
4 files changed, 8 insertions, 8 deletions
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index a4046ca6e60d..6b423485c5d6 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -821,13 +821,15 @@ struct se_device *target_alloc_device(struct se_hba *hba, const char *name)
* in ATA and we need to set TPE=1
*/
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
- struct request_queue *q, int block_size)
+ struct request_queue *q)
{
+ int block_size = queue_logical_block_size(q);
+
if (!blk_queue_discard(q))
return false;
- attrib->max_unmap_lba_count = (q->limits.max_discard_sectors << 9) /
- block_size;
+ attrib->max_unmap_lba_count =
+ q->limits.max_discard_sectors >> (ilog2(block_size) - 9);
/*
* Currently hardcoded to 1 in Linux/SCSI code..
*/
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 75f0f08b2a34..79291869bce6 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -161,8 +161,7 @@ static int fd_configure_device(struct se_device *dev)
dev_size, div_u64(dev_size, fd_dev->fd_block_size),
fd_dev->fd_block_size);
- if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
- fd_dev->fd_block_size))
+ if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
pr_debug("IFILE: BLOCK Discard support available,"
" disabled by default\n");
/*
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 7c4efb4417b0..2077bc28640a 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -121,8 +121,7 @@ static int iblock_configure_device(struct se_device *dev)
dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
dev->dev_attrib.hw_queue_depth = q->nr_requests;
- if (target_configure_unmap_from_queue(&dev->dev_attrib, q,
- dev->dev_attrib.hw_block_size))
+ if (target_configure_unmap_from_queue(&dev->dev_attrib, q))
pr_debug("IBLOCK: BLOCK Discard support available,"
" disabled by default\n");
diff --git a/include/target/target_core_backend.h b/include/target/target_core_backend.h
index d8ab5101fad5..f6f3bc52c1ac 100644
--- a/include/target/target_core_backend.h
+++ b/include/target/target_core_backend.h
@@ -95,6 +95,6 @@ sense_reason_t passthrough_parse_cdb(struct se_cmd *cmd,
bool target_sense_desc_format(struct se_device *dev);
sector_t target_to_linux_sector(struct se_device *dev, sector_t lb);
bool target_configure_unmap_from_queue(struct se_dev_attrib *attrib,
- struct request_queue *q, int block_size);
+ struct request_queue *q);
#endif /* TARGET_CORE_BACKEND_H */