summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorMartin K. Petersen <martin.petersen@oracle.com>2023-04-02 22:10:40 -0400
committerMartin K. Petersen <martin.petersen@oracle.com>2023-04-02 22:10:40 -0400
commitdc70c9615c067dbc34a1af736477f7d2b7f75319 (patch)
treeda53c55cd22ac38728f417305cca1cd97c0c6da5
parent60b3f355c7a3808a16dbef34dc61971a2b9bc80d (diff)
parentf1437cd1e535c5d5cc9f6e5bfdfc9b1cd3141bc4 (diff)
downloadlinux-stable-dc70c9615c067dbc34a1af736477f7d2b7f75319.tar.gz
linux-stable-dc70c9615c067dbc34a1af736477f7d2b7f75319.tar.bz2
linux-stable-dc70c9615c067dbc34a1af736477f7d2b7f75319.zip
Merge patch series "Fix shost command overloading issues"
John Garry <john.g.garry@oracle.com> says: It's easy to get scsi_debug to error on throughput testing when we have multiple shosts: $ lsscsi [7:0:0:0] disk Linux scsi_debug 0191 [0:0:0:0] disk Linux scsi_debug 0191 $ fio --filename=/dev/sda --filename=/dev/sdb --direct=1 --rw=read --bs=4k --iodepth=256 --runtime=60 --numjobs=40 --time_based --name=jpg --eta-newline=1 --readonly --ioengine=io_uring --hipri --exitall_on_error jpg: (g=0): rw=read, bs=(R) 4096B-4096B, (W) 4096B-4096B, (T) 4096B-4096B, ioengine=io_uring, iodepth=256 ... fio-3.28 Starting 40 processes [ 27.521809] hrtimer: interrupt took 33067 ns [ 27.904660] sd 7:0:0:0: [sdb] tag#171 FAILED Result: hostbyte=DID_ABORT driverbyte=DRIVER_OK cmd_age=0s [ 27.904660] sd 0:0:0:0: [sda] tag#58 FAILED Result: hostbyte=DID_ABORT driverbyte=DRIVER_OK cmd_age=0s fio: io_u error [ 27.904667] sd 0:0:0:0: [sda] tag#58 CDB: Read(10) 28 00 00 00 27 00 00 01 18 00 on file /dev/sda[ 27.904670] sd 0:0:0:0: [sda] tag#62 FAILED Result: hostbyte=DID_ABORT driverbyte=DRIVER_OK cmd_age=0s The issue is related to how the driver manages submit queues and tags. A single array of submit queues - sdebug_q_arr - with its own set of tags is shared among all shosts. As such, for occasions when we have more than one host it is possible to overload the submit queues and run out of tags. Another separate issue that we may reduce the shost submit queue depth, sdebug_max_queue, dynamically causing the shost to be overloaded. How many IOs which the shost may be sent is fixed at can_queue at init time, which is the same initial value for sdebug_max_queue. So reducing sdebug_max_queue means that the shost may be sent more IOs than it is configured to handle, causing overloading. This series removes the scsi_debug submit queue concept and uses pre-existing APIs to manage and examine tags, like scsi_block_requests() and blk_mq_tagset_busy_iter(). Using standard APIs makes the driver more maintainable and extensible in future. A restriction is also added to allow sdebug_max_queue only be modified when no shosts are present, i.e. we need to remove shosts, modify sdebug_max_queue, and then re-add the shosts. Link: https://lore.kernel.org/r/20230327074310.1862889-1-john.g.garry@oracle.com Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
-rw-r--r--drivers/scsi/scsi_debug.c783
1 files changed, 361 insertions, 422 deletions
diff --git a/drivers/scsi/scsi_debug.c b/drivers/scsi/scsi_debug.c
index 4500a5fdb92b..cf3f58e8f733 100644
--- a/drivers/scsi/scsi_debug.c
+++ b/drivers/scsi/scsi_debug.c
@@ -250,6 +250,11 @@ static const char *sdebug_version_date = "20210520";
#define SDEB_XA_NOT_IN_USE XA_MARK_1
+static struct kmem_cache *queued_cmd_cache;
+
+#define TO_QUEUED_CMD(scmd) ((void *)(scmd)->host_scribble)
+#define ASSIGN_QUEUED_CMD(scmnd, qc) { (scmnd)->host_scribble = (void *) qc; }
+
/* Zone types (zbcr05 table 25) */
enum sdebug_z_type {
ZBC_ZTYPE_CNV = 0x1,
@@ -336,13 +341,7 @@ struct sdebug_defer {
struct hrtimer hrt;
struct execute_work ew;
ktime_t cmpl_ts;/* time since boot to complete this cmd */
- int sqa_idx; /* index of sdebug_queue array */
- int qc_idx; /* index of sdebug_queued_cmd array within sqa_idx */
- int hc_idx; /* hostwide tag index */
int issuing_cpu;
- bool init_hrt;
- bool init_wq;
- bool init_poll;
bool aborted; /* true when blk_abort_request() already called */
enum sdeb_defer_type defer_t;
};
@@ -351,15 +350,12 @@ struct sdebug_queued_cmd {
/* corresponding bit set in in_use_bm[] in owning struct sdebug_queue
* instance indicates this slot is in use.
*/
- struct sdebug_defer *sd_dp;
- struct scsi_cmnd *a_cmnd;
+ struct sdebug_defer sd_dp;
+ struct scsi_cmnd *scmd;
};
-struct sdebug_queue {
- struct sdebug_queued_cmd qc_arr[SDEBUG_CANQUEUE];
- unsigned long in_use_bm[SDEBUG_CANQUEUE_WORDS];
- spinlock_t qc_lock;
- atomic_t blocked; /* to temporarily stop more being queued */
+struct sdebug_scsi_cmd {
+ spinlock_t lock;
};
static atomic_t sdebug_cmnd_count; /* number of incoming commands */
@@ -509,6 +505,8 @@ static int sdebug_add_store(void);
static void sdebug_erase_store(int idx, struct sdeb_store_info *sip);
static void sdebug_erase_all_stores(bool apart_from_first);
+static void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp);
+
/*
* The following are overflow arrays for cdbs that "hit" the same index in
* the opcode_info_arr array. The most time sensitive (or commonly used) cdb
@@ -756,7 +754,6 @@ static int sdebug_max_luns = DEF_MAX_LUNS;
static int sdebug_max_queue = SDEBUG_CANQUEUE; /* per submit queue */
static unsigned int sdebug_medium_error_start = OPT_MEDIUM_ERR_ADDR;
static int sdebug_medium_error_count = OPT_MEDIUM_ERR_NUM;
-static atomic_t retired_max_queue; /* if > 0 then was prior max_queue */
static int sdebug_ndelay = DEF_NDELAY; /* if > 0 then unit is nanoseconds */
static int sdebug_no_lun_0 = DEF_NO_LUN_0;
static int sdebug_no_uld;
@@ -816,7 +813,7 @@ static int sdebug_cylinders_per; /* cylinders per surface */
static int sdebug_sectors_per; /* sectors per cylinder */
static LIST_HEAD(sdebug_host_list);
-static DEFINE_SPINLOCK(sdebug_host_list_lock);
+static DEFINE_MUTEX(sdebug_host_list_mutex);
static struct xarray per_store_arr;
static struct xarray *per_store_ap = &per_store_arr;
@@ -843,7 +840,6 @@ static int sdeb_zbc_nr_conv = DEF_ZBC_NR_CONV_ZONES;
static int submit_queues = DEF_SUBMIT_QUEUES; /* > 1 for multi-queue (mq) */
static int poll_queues; /* iouring iopoll interface.*/
-static struct sdebug_queue *sdebug_q_arr; /* ptr to array of submit queues */
static DEFINE_RWLOCK(atomic_rw);
static DEFINE_RWLOCK(atomic_rw2);
@@ -908,7 +904,7 @@ static void sdebug_max_tgts_luns(void)
struct sdebug_host_info *sdbg_host;
struct Scsi_Host *hpnt;
- spin_lock(&sdebug_host_list_lock);
+ mutex_lock(&sdebug_host_list_mutex);
list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
hpnt = sdbg_host->shost;
if ((hpnt->this_id >= 0) &&
@@ -919,7 +915,7 @@ static void sdebug_max_tgts_luns(void)
/* sdebug_max_luns; */
hpnt->max_lun = SCSI_W_LUN_REPORT_LUNS + 1;
}
- spin_unlock(&sdebug_host_list_lock);
+ mutex_unlock(&sdebug_host_list_mutex);
}
enum sdeb_cmd_data {SDEB_IN_DATA = 0, SDEB_IN_CDB = 1};
@@ -1051,30 +1047,27 @@ static void all_config_cdb_len(void)
struct Scsi_Host *shost;
struct scsi_device *sdev;
- spin_lock(&sdebug_host_list_lock);
+ mutex_lock(&sdebug_host_list_mutex);
list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
shost = sdbg_host->shost;
shost_for_each_device(sdev, shost) {
config_cdb_len(sdev);
}
}
- spin_unlock(&sdebug_host_list_lock);
+ mutex_unlock(&sdebug_host_list_mutex);
}
static void clear_luns_changed_on_target(struct sdebug_dev_info *devip)
{
- struct sdebug_host_info *sdhp;
+ struct sdebug_host_info *sdhp = devip->sdbg_host;
struct sdebug_dev_info *dp;
- spin_lock(&sdebug_host_list_lock);
- list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
- list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
- if ((devip->sdbg_host == dp->sdbg_host) &&
- (devip->target == dp->target))
- clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
+ list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
+ if ((devip->sdbg_host == dp->sdbg_host) &&
+ (devip->target == dp->target)) {
+ clear_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
}
}
- spin_unlock(&sdebug_host_list_lock);
}
static int make_ua(struct scsi_cmnd *scp, struct sdebug_dev_info *devip)
@@ -4901,20 +4894,6 @@ fini:
return res;
}
-static struct sdebug_queue *get_queue(struct scsi_cmnd *cmnd)
-{
- u16 hwq;
- u32 tag = blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
-
- hwq = blk_mq_unique_tag_to_hwq(tag);
-
- pr_debug("tag=%#x, hwq=%d\n", tag, hwq);
- if (WARN_ON_ONCE(hwq >= submit_queues))
- hwq = 0;
-
- return sdebug_q_arr + hwq;
-}
-
static u32 get_tag(struct scsi_cmnd *cmnd)
{
return blk_mq_unique_tag(scsi_cmd_to_rq(cmnd));
@@ -4923,71 +4902,41 @@ static u32 get_tag(struct scsi_cmnd *cmnd)
/* Queued (deferred) command completions converge here. */
static void sdebug_q_cmd_complete(struct sdebug_defer *sd_dp)
{
- bool aborted = sd_dp->aborted;
- int qc_idx;
- int retiring = 0;
- unsigned long iflags;
- struct sdebug_queue *sqp;
- struct sdebug_queued_cmd *sqcp;
- struct scsi_cmnd *scp;
+ struct sdebug_queued_cmd *sqcp = container_of(sd_dp, struct sdebug_queued_cmd, sd_dp);
+ unsigned long flags;
+ struct scsi_cmnd *scp = sqcp->scmd;
+ struct sdebug_scsi_cmd *sdsc;
+ bool aborted;
- if (unlikely(aborted))
- sd_dp->aborted = false;
- qc_idx = sd_dp->qc_idx;
- sqp = sdebug_q_arr + sd_dp->sqa_idx;
if (sdebug_statistics) {
atomic_inc(&sdebug_completions);
if (raw_smp_processor_id() != sd_dp->issuing_cpu)
atomic_inc(&sdebug_miss_cpus);
}
- if (unlikely((qc_idx < 0) || (qc_idx >= SDEBUG_CANQUEUE))) {
- pr_err("wild qc_idx=%d\n", qc_idx);
- return;
- }
- spin_lock_irqsave(&sqp->qc_lock, iflags);
- WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
- sqcp = &sqp->qc_arr[qc_idx];
- scp = sqcp->a_cmnd;
- if (unlikely(scp == NULL)) {
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- pr_err("scp is NULL, sqa_idx=%d, qc_idx=%d, hc_idx=%d\n",
- sd_dp->sqa_idx, qc_idx, sd_dp->hc_idx);
- return;
- }
-
- if (unlikely(atomic_read(&retired_max_queue) > 0))
- retiring = 1;
- sqcp->a_cmnd = NULL;
- if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- pr_err("Unexpected completion\n");
- return;
+ if (!scp) {
+ pr_err("scmd=NULL\n");
+ goto out;
}
- if (unlikely(retiring)) { /* user has reduced max_queue */
- int k, retval;
+ sdsc = scsi_cmd_priv(scp);
+ spin_lock_irqsave(&sdsc->lock, flags);
+ aborted = sd_dp->aborted;
+ if (unlikely(aborted))
+ sd_dp->aborted = false;
+ ASSIGN_QUEUED_CMD(scp, NULL);
- retval = atomic_read(&retired_max_queue);
- if (qc_idx >= retval) {
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- pr_err("index %d too large\n", retval);
- return;
- }
- k = find_last_bit(sqp->in_use_bm, retval);
- if ((k < sdebug_max_queue) || (k == retval))
- atomic_set(&retired_max_queue, 0);
- else
- atomic_set(&retired_max_queue, k + 1);
- }
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- if (unlikely(aborted)) {
- if (sdebug_verbose)
- pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
+ spin_unlock_irqrestore(&sdsc->lock, flags);
+
+ if (aborted) {
+ pr_info("bypassing scsi_done() due to aborted cmd, kicking-off EH\n");
blk_abort_request(scsi_cmd_to_rq(scp));
- return;
+ goto out;
}
+
scsi_done(scp); /* callback to mid level */
+out:
+ sdebug_free_queued_cmd(sqcp);
}
/* When high resolution timer goes off this function is called. */
@@ -5237,115 +5186,103 @@ static void scsi_debug_slave_destroy(struct scsi_device *sdp)
}
}
-static void stop_qc_helper(struct sdebug_defer *sd_dp,
+/* Returns true if we require the queued memory to be freed by the caller. */
+static bool stop_qc_helper(struct sdebug_defer *sd_dp,
enum sdeb_defer_type defer_t)
{
- if (!sd_dp)
- return;
- if (defer_t == SDEB_DEFER_HRT)
- hrtimer_cancel(&sd_dp->hrt);
- else if (defer_t == SDEB_DEFER_WQ)
- cancel_work_sync(&sd_dp->ew.work);
-}
-
-/* If @cmnd found deletes its timer or work queue and returns true; else
- returns false */
-static bool stop_queued_cmnd(struct scsi_cmnd *cmnd)
-{
- unsigned long iflags;
- int j, k, qmax, r_qmax;
- enum sdeb_defer_type l_defer_t;
- struct sdebug_queue *sqp;
- struct sdebug_queued_cmd *sqcp;
- struct sdebug_defer *sd_dp;
+ if (defer_t == SDEB_DEFER_HRT) {
+ int res = hrtimer_try_to_cancel(&sd_dp->hrt);
- for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
- spin_lock_irqsave(&sqp->qc_lock, iflags);
- qmax = sdebug_max_queue;
- r_qmax = atomic_read(&retired_max_queue);
- if (r_qmax > qmax)
- qmax = r_qmax;
- for (k = 0; k < qmax; ++k) {
- if (test_bit(k, sqp->in_use_bm)) {
- sqcp = &sqp->qc_arr[k];
- if (cmnd != sqcp->a_cmnd)
- continue;
- /* found */
- sqcp->a_cmnd = NULL;
- sd_dp = sqcp->sd_dp;
- if (sd_dp) {
- l_defer_t = READ_ONCE(sd_dp->defer_t);
- WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
- } else
- l_defer_t = SDEB_DEFER_NONE;
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- stop_qc_helper(sd_dp, l_defer_t);
- clear_bit(k, sqp->in_use_bm);
- return true;
- }
+ switch (res) {
+ case 0: /* Not active, it must have already run */
+ case -1: /* -1 It's executing the CB */
+ return false;
+ case 1: /* Was active, we've now cancelled */
+ default:
+ return true;
}
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+ } else if (defer_t == SDEB_DEFER_WQ) {
+ /* Cancel if pending */
+ if (cancel_work_sync(&sd_dp->ew.work))
+ return true;
+ /* Was not pending, so it must have run */
+ return false;
+ } else if (defer_t == SDEB_DEFER_POLL) {
+ return true;
}
+
return false;
}
-/* Deletes (stops) timers or work queues of all queued commands */
-static void stop_all_queued(void)
+
+static bool scsi_debug_stop_cmnd(struct scsi_cmnd *cmnd)
{
- unsigned long iflags;
- int j, k;
enum sdeb_defer_type l_defer_t;
- struct sdebug_queue *sqp;
- struct sdebug_queued_cmd *sqcp;
struct sdebug_defer *sd_dp;
+ struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
+ struct sdebug_queued_cmd *sqcp = TO_QUEUED_CMD(cmnd);
- for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
- spin_lock_irqsave(&sqp->qc_lock, iflags);
- for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
- if (test_bit(k, sqp->in_use_bm)) {
- sqcp = &sqp->qc_arr[k];
- if (sqcp->a_cmnd == NULL)
- continue;
- sqcp->a_cmnd = NULL;
- sd_dp = sqcp->sd_dp;
- if (sd_dp) {
- l_defer_t = READ_ONCE(sd_dp->defer_t);
- WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
- } else
- l_defer_t = SDEB_DEFER_NONE;
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- stop_qc_helper(sd_dp, l_defer_t);
- clear_bit(k, sqp->in_use_bm);
- spin_lock_irqsave(&sqp->qc_lock, iflags);
- }
- }
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- }
+ lockdep_assert_held(&sdsc->lock);
+
+ if (!sqcp)
+ return false;
+ sd_dp = &sqcp->sd_dp;
+ l_defer_t = READ_ONCE(sd_dp->defer_t);
+ ASSIGN_QUEUED_CMD(cmnd, NULL);
+
+ if (stop_qc_helper(sd_dp, l_defer_t))
+ sdebug_free_queued_cmd(sqcp);
+
+ return true;
}
-/* Free queued command memory on heap */
-static void free_all_queued(void)
+/*
+ * Called from scsi_debug_abort() only, which is for timed-out cmd.
+ */
+static bool scsi_debug_abort_cmnd(struct scsi_cmnd *cmnd)
{
- int j, k;
- struct sdebug_queue *sqp;
- struct sdebug_queued_cmd *sqcp;
+ struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
+ unsigned long flags;
+ bool res;
- for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
- for (k = 0; k < SDEBUG_CANQUEUE; ++k) {
- sqcp = &sqp->qc_arr[k];
- kfree(sqcp->sd_dp);
- sqcp->sd_dp = NULL;
- }
+ spin_lock_irqsave(&sdsc->lock, flags);
+ res = scsi_debug_stop_cmnd(cmnd);
+ spin_unlock_irqrestore(&sdsc->lock, flags);
+
+ return res;
+}
+
+/*
+ * All we can do is set the cmnd as internally aborted and wait for it to
+ * finish. We cannot call scsi_done() as normal completion path may do that.
+ */
+static bool sdebug_stop_cmnd(struct request *rq, void *data)
+{
+ scsi_debug_abort_cmnd(blk_mq_rq_to_pdu(rq));
+
+ return true;
+}
+
+/* Deletes (stops) timers or work queues of all queued commands */
+static void stop_all_queued(void)
+{
+ struct sdebug_host_info *sdhp;
+
+ mutex_lock(&sdebug_host_list_mutex);
+ list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
+ struct Scsi_Host *shost = sdhp->shost;
+
+ blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_stop_cmnd, NULL);
}
+ mutex_unlock(&sdebug_host_list_mutex);
}
static int scsi_debug_abort(struct scsi_cmnd *SCpnt)
{
- bool ok;
+ bool ok = scsi_debug_abort_cmnd(SCpnt);
++num_aborts;
- ok = stop_queued_cmnd(SCpnt);
if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, SCpnt->device,
"%s: command%s found\n", __func__,
@@ -5426,7 +5363,7 @@ static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
++num_host_resets;
if (SDEBUG_OPT_ALL_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, SCpnt->device, "%s\n", __func__);
- spin_lock(&sdebug_host_list_lock);
+ mutex_lock(&sdebug_host_list_mutex);
list_for_each_entry(sdbg_host, &sdebug_host_list, host_list) {
list_for_each_entry(devip, &sdbg_host->dev_info_list,
dev_list) {
@@ -5434,7 +5371,7 @@ static int scsi_debug_host_reset(struct scsi_cmnd *SCpnt)
++k;
}
}
- spin_unlock(&sdebug_host_list_lock);
+ mutex_unlock(&sdebug_host_list_mutex);
stop_all_queued();
if (SDEBUG_OPT_RESET_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, SCpnt->device,
@@ -5497,11 +5434,18 @@ static void sdebug_build_parts(unsigned char *ramp, unsigned long store_size)
static void block_unblock_all_queues(bool block)
{
- int j;
- struct sdebug_queue *sqp;
+ struct sdebug_host_info *sdhp;
- for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp)
- atomic_set(&sqp->blocked, (int)block);
+ lockdep_assert_held(&sdebug_host_list_mutex);
+
+ list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
+ struct Scsi_Host *shost = sdhp->shost;
+
+ if (block)
+ scsi_block_requests(shost);
+ else
+ scsi_unblock_requests(shost);
+ }
}
/* Adjust (by rounding down) the sdebug_cmnd_count so abs(every_nth)-1
@@ -5514,10 +5458,13 @@ static void tweak_cmnd_count(void)
modulo = abs(sdebug_every_nth);
if (modulo < 2)
return;
+
+ mutex_lock(&sdebug_host_list_mutex);
block_unblock_all_queues(true);
count = atomic_read(&sdebug_cmnd_count);
atomic_set(&sdebug_cmnd_count, (count / modulo) * modulo);
block_unblock_all_queues(false);
+ mutex_unlock(&sdebug_host_list_mutex);
}
static void clear_queue_stats(void)
@@ -5537,6 +5484,33 @@ static bool inject_on_this_cmd(void)
#define INCLUSIVE_TIMING_MAX_NS 1000000 /* 1 millisecond */
+
+void sdebug_free_queued_cmd(struct sdebug_queued_cmd *sqcp)
+{
+ if (sqcp)
+ kmem_cache_free(queued_cmd_cache, sqcp);
+}
+
+static struct sdebug_queued_cmd *sdebug_alloc_queued_cmd(struct scsi_cmnd *scmd)
+{
+ struct sdebug_queued_cmd *sqcp;
+ struct sdebug_defer *sd_dp;
+
+ sqcp = kmem_cache_zalloc(queued_cmd_cache, GFP_ATOMIC);
+ if (!sqcp)
+ return NULL;
+
+ sd_dp = &sqcp->sd_dp;
+
+ hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC, HRTIMER_MODE_REL_PINNED);
+ sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
+ INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
+
+ sqcp->scmd = scmd;
+
+ return sqcp;
+}
+
/* Complete the processing of the thread that queued a SCSI command to this
* driver. It either completes the command by calling cmnd_done() or
* schedules a hr timer or work queue then returns 0. Returns
@@ -5548,12 +5522,11 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
struct sdebug_dev_info *),
int delta_jiff, int ndelay)
{
- bool new_sd_dp;
- bool polled = scsi_cmd_to_rq(cmnd)->cmd_flags & REQ_POLLED;
- int k;
- unsigned long iflags;
+ struct request *rq = scsi_cmd_to_rq(cmnd);
+ bool polled = rq->cmd_flags & REQ_POLLED;
+ struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmnd);
+ unsigned long flags;
u64 ns_from_boot = 0;
- struct sdebug_queue *sqp;
struct sdebug_queued_cmd *sqcp;
struct scsi_device *sdp;
struct sdebug_defer *sd_dp;
@@ -5568,19 +5541,13 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
if (delta_jiff == 0)
goto respond_in_thread;
- sqp = get_queue(cmnd);
- spin_lock_irqsave(&sqp->qc_lock, iflags);
- if (unlikely(atomic_read(&sqp->blocked))) {
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- return SCSI_MLQUEUE_HOST_BUSY;
- }
if (unlikely(sdebug_every_nth && (SDEBUG_OPT_RARE_TSF & sdebug_opts) &&
(scsi_result == 0))) {
int num_in_q = scsi_device_busy(sdp);
int qdepth = cmnd->device->queue_depth;
- if ((num_in_q == (qdepth - 1)) &&
+ if ((num_in_q == qdepth) &&
(atomic_inc_return(&sdebug_a_tsf) >=
abs(sdebug_every_nth))) {
atomic_set(&sdebug_a_tsf, 0);
@@ -5592,38 +5559,12 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
}
}
- k = find_first_zero_bit(sqp->in_use_bm, sdebug_max_queue);
- if (unlikely(k >= sdebug_max_queue)) {
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- if (scsi_result)
- goto respond_in_thread;
- scsi_result = device_qfull_result;
- if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
- sdev_printk(KERN_INFO, sdp, "%s: max_queue=%d exceeded: TASK SET FULL\n",
- __func__, sdebug_max_queue);
- goto respond_in_thread;
- }
- set_bit(k, sqp->in_use_bm);
- sqcp = &sqp->qc_arr[k];
- sqcp->a_cmnd = cmnd;
- cmnd->host_scribble = (unsigned char *)sqcp;
- sd_dp = sqcp->sd_dp;
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
-
- if (!sd_dp) {
- sd_dp = kzalloc(sizeof(*sd_dp), GFP_ATOMIC);
- if (!sd_dp) {
- clear_bit(k, sqp->in_use_bm);
- return SCSI_MLQUEUE_HOST_BUSY;
- }
- new_sd_dp = true;
- } else {
- new_sd_dp = false;
+ sqcp = sdebug_alloc_queued_cmd(cmnd);
+ if (!sqcp) {
+ pr_err("%s no alloc\n", __func__);
+ return SCSI_MLQUEUE_HOST_BUSY;
}
-
- /* Set the hostwide tag */
- if (sdebug_host_max_queue)
- sd_dp->hc_idx = get_tag(cmnd);
+ sd_dp = &sqcp->sd_dp;
if (polled)
ns_from_boot = ktime_get_boottime_ns();
@@ -5670,13 +5611,8 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
u64 d = ktime_get_boottime_ns() - ns_from_boot;
if (kt <= d) { /* elapsed duration >= kt */
- spin_lock_irqsave(&sqp->qc_lock, iflags);
- sqcp->a_cmnd = NULL;
- clear_bit(k, sqp->in_use_bm);
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
- if (new_sd_dp)
- kfree(sd_dp);
/* call scsi_done() from this thread */
+ sdebug_free_queued_cmd(sqcp);
scsi_done(cmnd);
return 0;
}
@@ -5684,33 +5620,28 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
kt -= d;
}
}
+ if (sdebug_statistics)
+ sd_dp->issuing_cpu = raw_smp_processor_id();
if (polled) {
+ spin_lock_irqsave(&sdsc->lock, flags);
sd_dp->cmpl_ts = ktime_add(ns_to_ktime(ns_from_boot), kt);
- spin_lock_irqsave(&sqp->qc_lock, iflags);
- if (!sd_dp->init_poll) {
- sd_dp->init_poll = true;
- sqcp->sd_dp = sd_dp;
- sd_dp->sqa_idx = sqp - sdebug_q_arr;
- sd_dp->qc_idx = k;
- }
+ ASSIGN_QUEUED_CMD(cmnd, sqcp);
WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+ spin_unlock_irqrestore(&sdsc->lock, flags);
} else {
- if (!sd_dp->init_hrt) {
- sd_dp->init_hrt = true;
- sqcp->sd_dp = sd_dp;
- hrtimer_init(&sd_dp->hrt, CLOCK_MONOTONIC,
- HRTIMER_MODE_REL_PINNED);
- sd_dp->hrt.function = sdebug_q_cmd_hrt_complete;
- sd_dp->sqa_idx = sqp - sdebug_q_arr;
- sd_dp->qc_idx = k;
- }
- WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
/* schedule the invocation of scsi_done() for a later time */
+ spin_lock_irqsave(&sdsc->lock, flags);
+ ASSIGN_QUEUED_CMD(cmnd, sqcp);
+ WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_HRT);
hrtimer_start(&sd_dp->hrt, kt, HRTIMER_MODE_REL_PINNED);
+ /*
+ * The completion handler will try to grab sqcp->lock,
+ * so there is no chance that the completion handler
+ * will call scsi_done() until we release the lock
+ * here (so ok to keep referencing sdsc).
+ */
+ spin_unlock_irqrestore(&sdsc->lock, flags);
}
- if (sdebug_statistics)
- sd_dp->issuing_cpu = raw_smp_processor_id();
} else { /* jdelay < 0, use work queue */
if (unlikely((sdebug_opts & SDEBUG_OPT_CMD_ABORT) &&
atomic_read(&sdeb_inject_pending))) {
@@ -5720,30 +5651,21 @@ static int schedule_resp(struct scsi_cmnd *cmnd, struct sdebug_dev_info *devip,
blk_mq_unique_tag_to_tag(get_tag(cmnd)));
}
+ if (sdebug_statistics)
+ sd_dp->issuing_cpu = raw_smp_processor_id();
if (polled) {
+ spin_lock_irqsave(&sdsc->lock, flags);
+ ASSIGN_QUEUED_CMD(cmnd, sqcp);
sd_dp->cmpl_ts = ns_to_ktime(ns_from_boot);
- spin_lock_irqsave(&sqp->qc_lock, iflags);
- if (!sd_dp->init_poll) {
- sd_dp->init_poll = true;
- sqcp->sd_dp = sd_dp;
- sd_dp->sqa_idx = sqp - sdebug_q_arr;
- sd_dp->qc_idx = k;
- }
WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_POLL);
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+ spin_unlock_irqrestore(&sdsc->lock, flags);
} else {
- if (!sd_dp->init_wq) {
- sd_dp->init_wq = true;
- sqcp->sd_dp = sd_dp;
- sd_dp->sqa_idx = sqp - sdebug_q_arr;
- sd_dp->qc_idx = k;
- INIT_WORK(&sd_dp->ew.work, sdebug_q_cmd_wq_complete);
- }
+ spin_lock_irqsave(&sdsc->lock, flags);
+ ASSIGN_QUEUED_CMD(cmnd, sqcp);
WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_WQ);
schedule_work(&sd_dp->ew.work);
+ spin_unlock_irqrestore(&sdsc->lock, flags);
}
- if (sdebug_statistics)
- sd_dp->issuing_cpu = raw_smp_processor_id();
}
return 0;
@@ -5946,14 +5868,39 @@ static int scsi_debug_write_info(struct Scsi_Host *host, char *buffer,
return length;
}
+struct sdebug_submit_queue_data {
+ int *first;
+ int *last;
+ int queue_num;
+};
+
+static bool sdebug_submit_queue_iter(struct request *rq, void *opaque)
+{
+ struct sdebug_submit_queue_data *data = opaque;
+ u32 unique_tag = blk_mq_unique_tag(rq);
+ u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
+ u16 tag = blk_mq_unique_tag_to_tag(unique_tag);
+ int queue_num = data->queue_num;
+
+ if (hwq != queue_num)
+ return true;
+
+ /* Rely on iter'ing in ascending tag order */
+ if (*data->first == -1)
+ *data->first = *data->last = tag;
+ else
+ *data->last = tag;
+
+ return true;
+}
+
/* Output seen with 'cat /proc/scsi/scsi_debug/<host_id>'. It will be the
* same for each scsi_debug host (if more than one). Some of the counters
* output are not atomics so might be inaccurate in a busy system. */
static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
{
- int f, j, l;
- struct sdebug_queue *sqp;
struct sdebug_host_info *sdhp;
+ int j;
seq_printf(m, "scsi_debug adapter driver, version %s [%s]\n",
SDEBUG_VERSION, sdebug_version_date);
@@ -5981,11 +5928,17 @@ static int scsi_debug_show_info(struct seq_file *m, struct Scsi_Host *host)
atomic_read(&sdeb_mq_poll_count));
seq_printf(m, "submit_queues=%d\n", submit_queues);
- for (j = 0, sqp = sdebug_q_arr; j < submit_queues; ++j, ++sqp) {
+ for (j = 0; j < submit_queues; ++j) {
+ int f = -1, l = -1;
+ struct sdebug_submit_queue_data data = {
+ .queue_num = j,
+ .first = &f,
+ .last = &l,
+ };
seq_printf(m, " queue %d:\n", j);
- f = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
- if (f != sdebug_max_queue) {
- l = find_last_bit(sqp->in_use_bm, sdebug_max_queue);
+ blk_mq_tagset_busy_iter(&host->tag_set, sdebug_submit_queue_iter,
+ &data);
+ if (f >= 0) {
seq_printf(m, " in_use_bm BUSY: %s: %d,%d\n",
"first,last bits", f, l);
}
@@ -6036,15 +5989,15 @@ static ssize_t delay_store(struct device_driver *ddp, const char *buf,
if (count > 0 && sscanf(buf, "%d", &jdelay) == 1) {
res = count;
if (sdebug_jdelay != jdelay) {
- int j, k;
- struct sdebug_queue *sqp;
+ struct sdebug_host_info *sdhp;
+ mutex_lock(&sdebug_host_list_mutex);
block_unblock_all_queues(true);
- for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
- ++j, ++sqp) {
- k = find_first_bit(sqp->in_use_bm,
- sdebug_max_queue);
- if (k != sdebug_max_queue) {
+
+ list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
+ struct Scsi_Host *shost = sdhp->shost;
+
+ if (scsi_host_busy(shost)) {
res = -EBUSY; /* queued commands */
break;
}
@@ -6054,6 +6007,7 @@ static ssize_t delay_store(struct device_driver *ddp, const char *buf,
sdebug_ndelay = 0;
}
block_unblock_all_queues(false);
+ mutex_unlock(&sdebug_host_list_mutex);
}
return res;
}
@@ -6076,25 +6030,27 @@ static ssize_t ndelay_store(struct device_driver *ddp, const char *buf,
(ndelay >= 0) && (ndelay < (1000 * 1000 * 1000))) {
res = count;
if (sdebug_ndelay != ndelay) {
- int j, k;
- struct sdebug_queue *sqp;
+ struct sdebug_host_info *sdhp;
+ mutex_lock(&sdebug_host_list_mutex);
block_unblock_all_queues(true);
- for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
- ++j, ++sqp) {
- k = find_first_bit(sqp->in_use_bm,
- sdebug_max_queue);
- if (k != sdebug_max_queue) {
+
+ list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
+ struct Scsi_Host *shost = sdhp->shost;
+
+ if (scsi_host_busy(shost)) {
res = -EBUSY; /* queued commands */
break;
}
}
+
if (res > 0) {
sdebug_ndelay = ndelay;
sdebug_jdelay = ndelay ? JDELAY_OVERRIDDEN
: DEF_JDELAY;
}
block_unblock_all_queues(false);
+ mutex_unlock(&sdebug_host_list_mutex);
}
return res;
}
@@ -6340,13 +6296,13 @@ static ssize_t lun_format_store(struct device_driver *ddp, const char *buf,
struct sdebug_host_info *sdhp;
struct sdebug_dev_info *dp;
- spin_lock(&sdebug_host_list_lock);
+ mutex_lock(&sdebug_host_list_mutex);
list_for_each_entry(sdhp, &sdebug_host_list, host_list) {
list_for_each_entry(dp, &sdhp->dev_info_list, dev_list) {
set_bit(SDEBUG_UA_LUNS_CHANGED, dp->uas_bm);
}
}
- spin_unlock(&sdebug_host_list_lock);
+ mutex_unlock(&sdebug_host_list_mutex);
}
return count;
}
@@ -6376,7 +6332,7 @@ static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
struct sdebug_host_info *sdhp;
struct sdebug_dev_info *dp;
- spin_lock(&sdebug_host_list_lock);
+ mutex_lock(&sdebug_host_list_mutex);
list_for_each_entry(sdhp, &sdebug_host_list,
host_list) {
list_for_each_entry(dp, &sdhp->dev_info_list,
@@ -6385,7 +6341,7 @@ static ssize_t max_luns_store(struct device_driver *ddp, const char *buf,
dp->uas_bm);
}
}
- spin_unlock(&sdebug_host_list_lock);
+ mutex_unlock(&sdebug_host_list_mutex);
}
return count;
}
@@ -6402,28 +6358,19 @@ static ssize_t max_queue_show(struct device_driver *ddp, char *buf)
static ssize_t max_queue_store(struct device_driver *ddp, const char *buf,
size_t count)
{
- int j, n, k, a;
- struct sdebug_queue *sqp;
+ int n;
if ((count > 0) && (1 == sscanf(buf, "%d", &n)) && (n > 0) &&
(n <= SDEBUG_CANQUEUE) &&
(sdebug_host_max_queue == 0)) {
- block_unblock_all_queues(true);
- k = 0;
- for (j = 0, sqp = sdebug_q_arr; j < submit_queues;
- ++j, ++sqp) {
- a = find_last_bit(sqp->in_use_bm, SDEBUG_CANQUEUE);
- if (a > k)
- k = a;
- }
- sdebug_max_queue = n;
- if (k == SDEBUG_CANQUEUE)
- atomic_set(&retired_max_queue, 0);
- else if (k >= n)
- atomic_set(&retired_max_queue, k + 1);
+ mutex_lock(&sdebug_host_list_mutex);
+
+ /* We may only change sdebug_max_queue when we have no shosts */
+ if (list_empty(&sdebug_host_list))
+ sdebug_max_queue = n;
else
- atomic_set(&retired_max_queue, 0);
- block_unblock_all_queues(false);
+ count = -EBUSY;
+ mutex_unlock(&sdebug_host_list_mutex);
return count;
}
return -EINVAL;
@@ -6492,7 +6439,7 @@ static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
struct sdebug_host_info *sdhp;
struct sdebug_dev_info *dp;
- spin_lock(&sdebug_host_list_lock);
+ mutex_lock(&sdebug_host_list_mutex);
list_for_each_entry(sdhp, &sdebug_host_list,
host_list) {
list_for_each_entry(dp, &sdhp->dev_info_list,
@@ -6501,7 +6448,7 @@ static ssize_t virtual_gb_store(struct device_driver *ddp, const char *buf,
dp->uas_bm);
}
}
- spin_unlock(&sdebug_host_list_lock);
+ mutex_unlock(&sdebug_host_list_mutex);
}
return count;
}
@@ -6851,7 +6798,6 @@ static int __init scsi_debug_init(void)
ramdisk_lck_a[0] = &atomic_rw;
ramdisk_lck_a[1] = &atomic_rw2;
- atomic_set(&retired_max_queue, 0);
if (sdebug_ndelay >= 1000 * 1000 * 1000) {
pr_warn("ndelay must be less than 1 second, ignored\n");
@@ -6947,13 +6893,6 @@ static int __init scsi_debug_init(void)
sdebug_max_queue);
}
- sdebug_q_arr = kcalloc(submit_queues, sizeof(struct sdebug_queue),
- GFP_KERNEL);
- if (sdebug_q_arr == NULL)
- return -ENOMEM;
- for (k = 0; k < submit_queues; ++k)
- spin_lock_init(&sdebug_q_arr[k].qc_lock);
-
/*
* check for host managed zoned block device specified with
* ptype=0x14 or zbc=XXX.
@@ -6962,10 +6901,8 @@ static int __init scsi_debug_init(void)
sdeb_zbc_model = BLK_ZONED_HM;
} else if (sdeb_zbc_model_s && *sdeb_zbc_model_s) {
k = sdeb_zbc_model_str(sdeb_zbc_model_s);
- if (k < 0) {
- ret = k;
- goto free_q_arr;
- }
+ if (k < 0)
+ return k;
sdeb_zbc_model = k;
switch (sdeb_zbc_model) {
case BLK_ZONED_NONE:
@@ -6977,8 +6914,7 @@ static int __init scsi_debug_init(void)
break;
default:
pr_err("Invalid ZBC model\n");
- ret = -EINVAL;
- goto free_q_arr;
+ return -EINVAL;
}
}
if (sdeb_zbc_model != BLK_ZONED_NONE) {
@@ -7025,17 +6961,14 @@ static int __init scsi_debug_init(void)
sdebug_unmap_granularity <=
sdebug_unmap_alignment) {
pr_err("ERR: unmap_granularity <= unmap_alignment\n");
- ret = -EINVAL;
- goto free_q_arr;
+ return -EINVAL;
}
}
xa_init_flags(per_store_ap, XA_FLAGS_ALLOC | XA_FLAGS_LOCK_IRQ);
if (want_store) {
idx = sdebug_add_store();
- if (idx < 0) {
- ret = idx;
- goto free_q_arr;
- }
+ if (idx < 0)
+ return idx;
}
pseudo_primary = root_device_register("pseudo_0");
@@ -7058,6 +6991,10 @@ static int __init scsi_debug_init(void)
hosts_to_add = sdebug_add_host;
sdebug_add_host = 0;
+ queued_cmd_cache = KMEM_CACHE(sdebug_queued_cmd, SLAB_HWCACHE_ALIGN);
+ if (!queued_cmd_cache)
+ goto driver_unreg;
+
for (k = 0; k < hosts_to_add; k++) {
if (want_store && k == 0) {
ret = sdebug_add_host_helper(idx);
@@ -7080,14 +7017,14 @@ static int __init scsi_debug_init(void)
return 0;
+driver_unreg:
+ driver_unregister(&sdebug_driverfs_driver);
bus_unreg:
bus_unregister(&pseudo_lld_bus);
dev_unreg:
root_device_unregister(pseudo_primary);
free_vm:
sdebug_erase_store(idx, NULL);
-free_q_arr:
- kfree(sdebug_q_arr);
return ret;
}
@@ -7095,17 +7032,15 @@ static void __exit scsi_debug_exit(void)
{
int k = sdebug_num_hosts;
- stop_all_queued();
for (; k; k--)
sdebug_do_remove_host(true);
- free_all_queued();
+ kmem_cache_destroy(queued_cmd_cache);
driver_unregister(&sdebug_driverfs_driver);
bus_unregister(&pseudo_lld_bus);
root_device_unregister(pseudo_primary);
sdebug_erase_all_stores(false);
xa_destroy(per_store_ap);
- kfree(sdebug_q_arr);
}
device_initcall(scsi_debug_init);
@@ -7261,9 +7196,9 @@ static int sdebug_add_host_helper(int per_host_idx)
goto clean;
}
- spin_lock(&sdebug_host_list_lock);
+ mutex_lock(&sdebug_host_list_mutex);
list_add_tail(&sdbg_host->host_list, &sdebug_host_list);
- spin_unlock(&sdebug_host_list_lock);
+ mutex_unlock(&sdebug_host_list_mutex);
sdbg_host->dev.bus = &pseudo_lld_bus;
sdbg_host->dev.parent = pseudo_primary;
@@ -7272,9 +7207,9 @@ static int sdebug_add_host_helper(int per_host_idx)
error = device_register(&sdbg_host->dev);
if (error) {
- spin_lock(&sdebug_host_list_lock);
+ mutex_lock(&sdebug_host_list_mutex);
list_del(&sdbg_host->host_list);
- spin_unlock(&sdebug_host_list_lock);
+ mutex_unlock(&sdebug_host_list_mutex);
goto clean;
}
@@ -7314,7 +7249,7 @@ static void sdebug_do_remove_host(bool the_end)
struct sdebug_host_info *sdbg_host = NULL;
struct sdebug_host_info *sdbg_host2;
- spin_lock(&sdebug_host_list_lock);
+ mutex_lock(&sdebug_host_list_mutex);
if (!list_empty(&sdebug_host_list)) {
sdbg_host = list_entry(sdebug_host_list.prev,
struct sdebug_host_info, host_list);
@@ -7339,7 +7274,7 @@ static void sdebug_do_remove_host(bool the_end)
}
if (sdbg_host)
list_del(&sdbg_host->host_list);
- spin_unlock(&sdebug_host_list_lock);
+ mutex_unlock(&sdebug_host_list_mutex);
if (!sdbg_host)
return;
@@ -7355,7 +7290,9 @@ static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
if (!devip)
return -ENODEV;
+ mutex_lock(&sdebug_host_list_mutex);
block_unblock_all_queues(true);
+
if (qdepth > SDEBUG_CANQUEUE) {
qdepth = SDEBUG_CANQUEUE;
pr_warn("%s: requested qdepth [%d] exceeds canqueue [%d], trim\n", __func__,
@@ -7366,9 +7303,12 @@ static int sdebug_change_qdepth(struct scsi_device *sdev, int qdepth)
if (qdepth != sdev->queue_depth)
scsi_change_queue_depth(sdev, qdepth);
+ block_unblock_all_queues(false);
+ mutex_unlock(&sdebug_host_list_mutex);
+
if (SDEBUG_OPT_Q_NOISE & sdebug_opts)
sdev_printk(KERN_INFO, sdev, "%s: qdepth=%d\n", __func__, qdepth);
- block_unblock_all_queues(false);
+
return sdev->queue_depth;
}
@@ -7458,95 +7398,82 @@ static void sdebug_map_queues(struct Scsi_Host *shost)
}
}
-static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
+struct sdebug_blk_mq_poll_data {
+ unsigned int queue_num;
+ int *num_entries;
+};
+
+/*
+ * We don't handle aborted commands here, but it does not seem possible to have
+ * aborted polled commands from schedule_resp()
+ */
+static bool sdebug_blk_mq_poll_iter(struct request *rq, void *opaque)
{
- bool first;
- bool retiring = false;
- int num_entries = 0;
- unsigned int qc_idx = 0;
- unsigned long iflags;
- ktime_t kt_from_boot = ktime_get_boottime();
- struct sdebug_queue *sqp;
- struct sdebug_queued_cmd *sqcp;
- struct scsi_cmnd *scp;
+ struct sdebug_blk_mq_poll_data *data = opaque;
+ struct scsi_cmnd *cmd = blk_mq_rq_to_pdu(rq);
+ struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
struct sdebug_defer *sd_dp;
+ u32 unique_tag = blk_mq_unique_tag(rq);
+ u16 hwq = blk_mq_unique_tag_to_hwq(unique_tag);
+ struct sdebug_queued_cmd *sqcp;
+ unsigned long flags;
+ int queue_num = data->queue_num;
+ ktime_t time;
- sqp = sdebug_q_arr + queue_num;
+ /* We're only interested in one queue for this iteration */
+ if (hwq != queue_num)
+ return true;
- spin_lock_irqsave(&sqp->qc_lock, iflags);
+ /* Subsequent checks would fail if this failed, but check anyway */
+ if (!test_bit(SCMD_STATE_INFLIGHT, &cmd->state))
+ return true;
- qc_idx = find_first_bit(sqp->in_use_bm, sdebug_max_queue);
- if (qc_idx >= sdebug_max_queue)
- goto unlock;
+ time = ktime_get_boottime();
- for (first = true; first || qc_idx + 1 < sdebug_max_queue; ) {
- if (first) {
- first = false;
- if (!test_bit(qc_idx, sqp->in_use_bm))
- continue;
- } else {
- qc_idx = find_next_bit(sqp->in_use_bm, sdebug_max_queue, qc_idx + 1);
- }
- if (qc_idx >= sdebug_max_queue)
- break;
+ spin_lock_irqsave(&sdsc->lock, flags);
+ sqcp = TO_QUEUED_CMD(cmd);
+ if (!sqcp) {
+ spin_unlock_irqrestore(&sdsc->lock, flags);
+ return true;
+ }
- sqcp = &sqp->qc_arr[qc_idx];
- sd_dp = sqcp->sd_dp;
- if (unlikely(!sd_dp))
- continue;
- scp = sqcp->a_cmnd;
- if (unlikely(scp == NULL)) {
- pr_err("scp is NULL, queue_num=%d, qc_idx=%u from %s\n",
- queue_num, qc_idx, __func__);
- break;
- }
- if (READ_ONCE(sd_dp->defer_t) == SDEB_DEFER_POLL) {
- if (kt_from_boot < sd_dp->cmpl_ts)
- continue;
+ sd_dp = &sqcp->sd_dp;
+ if (READ_ONCE(sd_dp->defer_t) != SDEB_DEFER_POLL) {
+ spin_unlock_irqrestore(&sdsc->lock, flags);
+ return true;
+ }
- } else /* ignoring non REQ_POLLED requests */
- continue;
- if (unlikely(atomic_read(&retired_max_queue) > 0))
- retiring = true;
+ if (time < sd_dp->cmpl_ts) {
+ spin_unlock_irqrestore(&sdsc->lock, flags);
+ return true;
+ }
- sqcp->a_cmnd = NULL;
- if (unlikely(!test_and_clear_bit(qc_idx, sqp->in_use_bm))) {
- pr_err("Unexpected completion sqp %p queue_num=%d qc_idx=%u from %s\n",
- sqp, queue_num, qc_idx, __func__);
- break;
- }
- if (unlikely(retiring)) { /* user has reduced max_queue */
- int k, retval;
+ ASSIGN_QUEUED_CMD(cmd, NULL);
+ spin_unlock_irqrestore(&sdsc->lock, flags);
- retval = atomic_read(&retired_max_queue);
- if (qc_idx >= retval) {
- pr_err("index %d too large\n", retval);
- break;
- }
- k = find_last_bit(sqp->in_use_bm, retval);
- if ((k < sdebug_max_queue) || (k == retval))
- atomic_set(&retired_max_queue, 0);
- else
- atomic_set(&retired_max_queue, k + 1);
- }
- WRITE_ONCE(sd_dp->defer_t, SDEB_DEFER_NONE);
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+ if (sdebug_statistics) {
+ atomic_inc(&sdebug_completions);
+ if (raw_smp_processor_id() != sd_dp->issuing_cpu)
+ atomic_inc(&sdebug_miss_cpus);
+ }
- if (sdebug_statistics) {
- atomic_inc(&sdebug_completions);
- if (raw_smp_processor_id() != sd_dp->issuing_cpu)
- atomic_inc(&sdebug_miss_cpus);
- }
+ sdebug_free_queued_cmd(sqcp);
- scsi_done(scp); /* callback to mid level */
- num_entries++;
- spin_lock_irqsave(&sqp->qc_lock, iflags);
- if (find_first_bit(sqp->in_use_bm, sdebug_max_queue) >= sdebug_max_queue)
- break;
- }
+ scsi_done(cmd); /* callback to mid level */
+ (*data->num_entries)++;
+ return true;
+}
+
+static int sdebug_blk_mq_poll(struct Scsi_Host *shost, unsigned int queue_num)
+{
+ int num_entries = 0;
+ struct sdebug_blk_mq_poll_data data = {
+ .queue_num = queue_num,
+ .num_entries = &num_entries,
+ };
-unlock:
- spin_unlock_irqrestore(&sqp->qc_lock, iflags);
+ blk_mq_tagset_busy_iter(&shost->tag_set, sdebug_blk_mq_poll_iter,
+ &data);
if (num_entries > 0)
atomic_add(num_entries, &sdeb_mq_poll_count);
@@ -7720,6 +7647,16 @@ err_out:
return schedule_resp(scp, NULL, DID_NO_CONNECT << 16, NULL, 0, 0);
}
+static int sdebug_init_cmd_priv(struct Scsi_Host *shost, struct scsi_cmnd *cmd)
+{
+ struct sdebug_scsi_cmd *sdsc = scsi_cmd_priv(cmd);
+
+ spin_lock_init(&sdsc->lock);
+
+ return 0;
+}
+
+
static struct scsi_host_template sdebug_driver_template = {
.show_info = scsi_debug_show_info,
.write_info = scsi_debug_write_info,
@@ -7747,6 +7684,8 @@ static struct scsi_host_template sdebug_driver_template = {
.max_segment_size = -1U,
.module = THIS_MODULE,
.track_queue_depth = 1,
+ .cmd_size = sizeof(struct sdebug_scsi_cmd),
+ .init_cmd_priv = sdebug_init_cmd_priv,
};
static int sdebug_driver_probe(struct device *dev)