diff options
author | Mike Christie <michael.christie@oracle.com> | 2021-09-29 21:04:21 -0500 |
---|---|---|
committer | Martin K. Petersen <martin.petersen@oracle.com> | 2021-10-18 22:38:36 -0400 |
commit | 7324f47d4293ff50f489010735a4057defb1a5d6 (patch) | |
tree | 67eadcd82db8a482dfba6802d7a05929288a0091 /drivers/target | |
parent | 1283c0d1a32bb924324481586b5d6e8e76f676ba (diff) | |
download | linux-7324f47d4293ff50f489010735a4057defb1a5d6.tar.gz linux-7324f47d4293ff50f489010735a4057defb1a5d6.tar.bz2 linux-7324f47d4293ff50f489010735a4057defb1a5d6.zip |
scsi: target: Replace lun_tg_pt_gp_lock with rcu in I/O path
We are only holding the lun_tg_pt_gp_lock in target_alua_state_check() to
make sure tg_pt_gp is not freed from under us while we copy the state,
delay, ID values. We can instead use RCU here to access the tg_pt_gp.
With this patch IOPs can increase up to 10% for jobs like:
fio --filename=/dev/sdX --direct=1 --rw=randrw --bs=4k \
--ioengine=libaio --iodepth=64 --numjobs=N
when there are multiple sessions (running that fio command to each /dev/sdX
or using multipath and there are over 8 paths), or more than 8 queues for
the loop or vhost with multiple threads case and numjobs > 8.
Link: https://lore.kernel.org/r/20210930020422.92578-5-michael.christie@oracle.com
Signed-off-by: Mike Christie <michael.christie@oracle.com>
Signed-off-by: Martin K. Petersen <martin.petersen@oracle.com>
Diffstat (limited to 'drivers/target')
-rw-r--r-- | drivers/target/target_core_alua.c | 61 |
1 files changed, 34 insertions, 27 deletions
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c index bd0f2ce011dd..74944b914b4e 100644 --- a/drivers/target/target_core_alua.c +++ b/drivers/target/target_core_alua.c @@ -247,11 +247,11 @@ target_emulate_report_target_port_groups(struct se_cmd *cmd) * this CDB was received upon to determine this value individually * for ALUA target port group. */ - spin_lock(&cmd->se_lun->lun_tg_pt_gp_lock); - tg_pt_gp = cmd->se_lun->lun_tg_pt_gp; + rcu_read_lock(); + tg_pt_gp = rcu_dereference(cmd->se_lun->lun_tg_pt_gp); if (tg_pt_gp) buf[5] = tg_pt_gp->tg_pt_gp_implicit_trans_secs; - spin_unlock(&cmd->se_lun->lun_tg_pt_gp_lock); + rcu_read_unlock(); } transport_kunmap_data_sg(cmd); @@ -292,24 +292,24 @@ target_emulate_set_target_port_groups(struct se_cmd *cmd) * Determine if explicit ALUA via SET_TARGET_PORT_GROUPS is allowed * for the local tg_pt_gp. */ - spin_lock(&l_lun->lun_tg_pt_gp_lock); - l_tg_pt_gp = l_lun->lun_tg_pt_gp; + rcu_read_lock(); + l_tg_pt_gp = rcu_dereference(l_lun->lun_tg_pt_gp); if (!l_tg_pt_gp) { - spin_unlock(&l_lun->lun_tg_pt_gp_lock); + rcu_read_unlock(); pr_err("Unable to access l_lun->tg_pt_gp\n"); rc = TCM_UNSUPPORTED_SCSI_OPCODE; goto out; } if (!(l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICIT_ALUA)) { - spin_unlock(&l_lun->lun_tg_pt_gp_lock); + rcu_read_unlock(); pr_debug("Unable to process SET_TARGET_PORT_GROUPS" " while TPGS_EXPLICIT_ALUA is disabled\n"); rc = TCM_UNSUPPORTED_SCSI_OPCODE; goto out; } valid_states = l_tg_pt_gp->tg_pt_gp_alua_supported_states; - spin_unlock(&l_lun->lun_tg_pt_gp_lock); + rcu_read_unlock(); ptr = &buf[4]; /* Skip over RESERVED area in header */ @@ -662,17 +662,17 @@ target_alua_state_check(struct se_cmd *cmd) " target port\n"); return TCM_ALUA_OFFLINE; } - - if (!lun->lun_tg_pt_gp) + rcu_read_lock(); + tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp); + if (!tg_pt_gp) { + rcu_read_unlock(); return 0; + } - spin_lock(&lun->lun_tg_pt_gp_lock); - tg_pt_gp = lun->lun_tg_pt_gp; out_alua_state = tg_pt_gp->tg_pt_gp_alua_access_state; nonop_delay_msecs = tg_pt_gp->tg_pt_gp_nonop_delay_msecs; tg_pt_gp_id = tg_pt_gp->tg_pt_gp_id; - - spin_unlock(&lun->lun_tg_pt_gp_lock); + rcu_read_unlock(); /* * Process ALUA_ACCESS_STATE_ACTIVE_OPTIMIZED in a separate conditional * statement so the compiler knows explicitly to check this case first. @@ -1219,10 +1219,10 @@ static int core_alua_set_tg_pt_secondary_state( struct t10_alua_tg_pt_gp *tg_pt_gp; int trans_delay_msecs; - spin_lock(&lun->lun_tg_pt_gp_lock); - tg_pt_gp = lun->lun_tg_pt_gp; + rcu_read_lock(); + tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp); if (!tg_pt_gp) { - spin_unlock(&lun->lun_tg_pt_gp_lock); + rcu_read_unlock(); pr_err("Unable to complete secondary state" " transition\n"); return -EINVAL; @@ -1246,7 +1246,7 @@ static int core_alua_set_tg_pt_secondary_state( "implicit", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item), tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE"); - spin_unlock(&lun->lun_tg_pt_gp_lock); + rcu_read_unlock(); /* * Do the optional transition delay after we set the secondary * ALUA access state. @@ -1754,13 +1754,14 @@ void core_alua_free_tg_pt_gp( __target_attach_tg_pt_gp(lun, dev->t10_alua.default_tg_pt_gp); } else - lun->lun_tg_pt_gp = NULL; + rcu_assign_pointer(lun->lun_tg_pt_gp, NULL); spin_unlock(&lun->lun_tg_pt_gp_lock); spin_lock(&tg_pt_gp->tg_pt_gp_lock); } spin_unlock(&tg_pt_gp->tg_pt_gp_lock); + synchronize_rcu(); kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp); } @@ -1805,7 +1806,7 @@ static void __target_attach_tg_pt_gp(struct se_lun *lun, assert_spin_locked(&lun->lun_tg_pt_gp_lock); spin_lock(&tg_pt_gp->tg_pt_gp_lock); - lun->lun_tg_pt_gp = tg_pt_gp; + rcu_assign_pointer(lun->lun_tg_pt_gp, tg_pt_gp); list_add_tail(&lun->lun_tg_pt_gp_link, &tg_pt_gp->tg_pt_gp_lun_list); tg_pt_gp->tg_pt_gp_members++; spin_lock(&lun->lun_deve_lock); @@ -1822,6 +1823,7 @@ void target_attach_tg_pt_gp(struct se_lun *lun, spin_lock(&lun->lun_tg_pt_gp_lock); __target_attach_tg_pt_gp(lun, tg_pt_gp); spin_unlock(&lun->lun_tg_pt_gp_lock); + synchronize_rcu(); } static void __target_detach_tg_pt_gp(struct se_lun *lun, @@ -1834,7 +1836,7 @@ static void __target_detach_tg_pt_gp(struct se_lun *lun, tg_pt_gp->tg_pt_gp_members--; spin_unlock(&tg_pt_gp->tg_pt_gp_lock); - lun->lun_tg_pt_gp = NULL; + rcu_assign_pointer(lun->lun_tg_pt_gp, NULL); } void target_detach_tg_pt_gp(struct se_lun *lun) @@ -1842,10 +1844,12 @@ void target_detach_tg_pt_gp(struct se_lun *lun) struct t10_alua_tg_pt_gp *tg_pt_gp; spin_lock(&lun->lun_tg_pt_gp_lock); - tg_pt_gp = lun->lun_tg_pt_gp; + tg_pt_gp = rcu_dereference_check(lun->lun_tg_pt_gp, + lockdep_is_held(&lun->lun_tg_pt_gp_lock)); if (tg_pt_gp) __target_detach_tg_pt_gp(lun, tg_pt_gp); spin_unlock(&lun->lun_tg_pt_gp_lock); + synchronize_rcu(); } ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page) @@ -1854,8 +1858,8 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page) struct t10_alua_tg_pt_gp *tg_pt_gp; ssize_t len = 0; - spin_lock(&lun->lun_tg_pt_gp_lock); - tg_pt_gp = lun->lun_tg_pt_gp; + rcu_read_lock(); + tg_pt_gp = rcu_dereference(lun->lun_tg_pt_gp); if (tg_pt_gp) { tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item; len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:" @@ -1871,7 +1875,7 @@ ssize_t core_alua_show_tg_pt_gp_info(struct se_lun *lun, char *page) "Offline" : "None", core_alua_dump_status(lun->lun_tg_pt_secondary_stat)); } - spin_unlock(&lun->lun_tg_pt_gp_lock); + rcu_read_unlock(); return len; } @@ -1918,7 +1922,8 @@ ssize_t core_alua_store_tg_pt_gp_info( } spin_lock(&lun->lun_tg_pt_gp_lock); - tg_pt_gp = lun->lun_tg_pt_gp; + tg_pt_gp = rcu_dereference_check(lun->lun_tg_pt_gp, + lockdep_is_held(&lun->lun_tg_pt_gp_lock)); if (tg_pt_gp) { /* * Clearing an existing tg_pt_gp association, and replacing @@ -1941,7 +1946,7 @@ ssize_t core_alua_store_tg_pt_gp_info( dev->t10_alua.default_tg_pt_gp); spin_unlock(&lun->lun_tg_pt_gp_lock); - return count; + goto sync_rcu; } __target_detach_tg_pt_gp(lun, tg_pt_gp); move = 1; @@ -1958,6 +1963,8 @@ ssize_t core_alua_store_tg_pt_gp_info( tg_pt_gp_new->tg_pt_gp_id); core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new); +sync_rcu: + synchronize_rcu(); return count; } |