summaryrefslogtreecommitdiffstats
path: root/drivers/target
diff options
context:
space:
mode:
authorMike Christie <mchristi@redhat.com>2017-11-28 12:40:29 -0600
committerNicholas Bellinger <nab@linux-iscsi.org>2018-01-12 15:07:11 -0800
commit89ec9cfd3b644fbc36047e36776509130d2fc1ec (patch)
treedeab530efef0477efc8010d9bc032cc2295ddcf9 /drivers/target
parentbf99ec13327bb5b0f6475aea8735c0ca34cc2a26 (diff)
downloadlinux-89ec9cfd3b644fbc36047e36776509130d2fc1ec.tar.gz
linux-89ec9cfd3b644fbc36047e36776509130d2fc1ec.tar.bz2
linux-89ec9cfd3b644fbc36047e36776509130d2fc1ec.zip
tcmu: split unmap_thread_fn
Separate unmap_thread_fn to make it easier to read. Note: this patch does not fix the bug where we might miss a wake up call. The next patch will fix that. This patch only separates the code into functions. Signed-off-by: Mike Christie <mchristi@redhat.com> Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
Diffstat (limited to 'drivers/target')
-rw-r--r--drivers/target/target_core_user.c120
1 files changed, 70 insertions, 50 deletions
diff --git a/drivers/target/target_core_user.c b/drivers/target/target_core_user.c
index d9fd91ee8282..cab6c72eb012 100644
--- a/drivers/target/target_core_user.c
+++ b/drivers/target/target_core_user.c
@@ -1973,71 +1973,91 @@ static struct target_backend_ops tcmu_ops = {
.tb_dev_attrib_attrs = NULL,
};
-static int unmap_thread_fn(void *data)
+
+static void find_free_blocks(void)
{
struct tcmu_dev *udev;
loff_t off;
uint32_t start, end, block;
- while (!kthread_should_stop()) {
- DEFINE_WAIT(__wait);
-
- prepare_to_wait(&unmap_wait, &__wait, TASK_INTERRUPTIBLE);
- schedule();
- finish_wait(&unmap_wait, &__wait);
+ mutex_lock(&root_udev_mutex);
+ list_for_each_entry(udev, &root_udev, node) {
+ mutex_lock(&udev->cmdr_lock);
- if (kthread_should_stop())
- break;
+ /* Try to complete the finished commands first */
+ tcmu_handle_completions(udev);
- mutex_lock(&root_udev_mutex);
- list_for_each_entry(udev, &root_udev, node) {
- mutex_lock(&udev->cmdr_lock);
+ /* Skip the udevs waiting the global pool or in idle */
+ if (udev->waiting_global || !udev->dbi_thresh) {
+ mutex_unlock(&udev->cmdr_lock);
+ continue;
+ }
- /* Try to complete the finished commands first */
- tcmu_handle_completions(udev);
+ end = udev->dbi_max + 1;
+ block = find_last_bit(udev->data_bitmap, end);
+ if (block == udev->dbi_max) {
+ /*
+ * The last bit is dbi_max, so there is
+ * no need to shrink any blocks.
+ */
+ mutex_unlock(&udev->cmdr_lock);
+ continue;
+ } else if (block == end) {
+ /* The current udev will goto idle state */
+ udev->dbi_thresh = start = 0;
+ udev->dbi_max = 0;
+ } else {
+ udev->dbi_thresh = start = block + 1;
+ udev->dbi_max = block;
+ }
- /* Skip the udevs waiting the global pool or in idle */
- if (udev->waiting_global || !udev->dbi_thresh) {
- mutex_unlock(&udev->cmdr_lock);
- continue;
- }
+ /* Here will truncate the data area from off */
+ off = udev->data_off + start * DATA_BLOCK_SIZE;
+ unmap_mapping_range(udev->inode->i_mapping, off, 0, 1);
- end = udev->dbi_max + 1;
- block = find_last_bit(udev->data_bitmap, end);
- if (block == udev->dbi_max) {
- /*
- * The last bit is dbi_max, so there is
- * no need to shrink any blocks.
- */
- mutex_unlock(&udev->cmdr_lock);
- continue;
- } else if (block == end) {
- /* The current udev will goto idle state */
- udev->dbi_thresh = start = 0;
- udev->dbi_max = 0;
- } else {
- udev->dbi_thresh = start = block + 1;
- udev->dbi_max = block;
- }
+ /* Release the block pages */
+ tcmu_blocks_release(&udev->data_blocks, start, end);
+ mutex_unlock(&udev->cmdr_lock);
+ }
+ mutex_unlock(&root_udev_mutex);
+}
- /* Here will truncate the data area from off */
- off = udev->data_off + start * DATA_BLOCK_SIZE;
- unmap_mapping_range(udev->inode->i_mapping, off, 0, 1);
+static void run_cmdr_queues(void)
+{
+ struct tcmu_dev *udev;
- /* Release the block pages */
- tcmu_blocks_release(&udev->data_blocks, start, end);
+ /*
+ * Try to wake up the udevs who are waiting
+ * for the global data block pool.
+ */
+ mutex_lock(&root_udev_mutex);
+ list_for_each_entry(udev, &root_udev, node) {
+ mutex_lock(&udev->cmdr_lock);
+ if (!udev->waiting_global) {
mutex_unlock(&udev->cmdr_lock);
+ break;
}
+ mutex_unlock(&udev->cmdr_lock);
- /*
- * Try to wake up the udevs who are waiting
- * for the global data pool.
- */
- list_for_each_entry(udev, &root_udev, node) {
- if (udev->waiting_global)
- wake_up(&udev->wait_cmdr);
- }
- mutex_unlock(&root_udev_mutex);
+ wake_up(&udev->wait_cmdr);
+ }
+ mutex_unlock(&root_udev_mutex);
+}
+
+static int unmap_thread_fn(void *data)
+{
+ while (!kthread_should_stop()) {
+ DEFINE_WAIT(__wait);
+
+ prepare_to_wait(&unmap_wait, &__wait, TASK_INTERRUPTIBLE);
+ schedule();
+ finish_wait(&unmap_wait, &__wait);
+
+ if (kthread_should_stop())
+ break;
+
+ find_free_blocks();
+ run_cmdr_queues();
}
return 0;