summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorEran Ben Elisha <eranbe@nvidia.com>2020-08-31 15:04:35 +0300
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2020-11-22 10:14:11 +0100
commitb57c75956e79e6fb526c3e71ea63f2ffb6eb8435 (patch)
tree1d9b2096912810c7735d012cdfd894b53060ba5c
parent7db82a5a4c15b89fa800ce2cf5d04cc0ae30279c (diff)
downloadlinux-stable-b57c75956e79e6fb526c3e71ea63f2ffb6eb8435.tar.gz
linux-stable-b57c75956e79e6fb526c3e71ea63f2ffb6eb8435.tar.bz2
linux-stable-b57c75956e79e6fb526c3e71ea63f2ffb6eb8435.zip
net/mlx5: Add retry mechanism to the command entry index allocation
commit 410bd754cd73c4a2ac3856d9a03d7b08f9c906bf upstream. It is possible that new command entry index allocation will temporarily fail. The new command holds the semaphore, so it means that a free entry should be ready soon. Add one second retry mechanism before returning an error. Patch "net/mlx5: Avoid possible free of command entry while timeout comp handler" increase the possibility to bump into this temporarily failure as it delays the entry index release for non-callback commands. Fixes: e126ba97dba9 ("mlx5: Add driver for Mellanox Connect-IB adapters") Signed-off-by: Eran Ben Elisha <eranbe@nvidia.com> Reviewed-by: Moshe Shemesh <moshe@nvidia.com> Signed-off-by: Saeed Mahameed <saeedm@nvidia.com> Cc: Timo Rothenpieler <timo@rothenpieler.org> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
-rw-r--r--drivers/net/ethernet/mellanox/mlx5/core/cmd.c21
1 files changed, 20 insertions, 1 deletions
diff --git a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
index b00e72b86863..76547d35cd0e 100644
--- a/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
+++ b/drivers/net/ethernet/mellanox/mlx5/core/cmd.c
@@ -883,6 +883,25 @@ static bool opcode_allowed(struct mlx5_cmd *cmd, u16 opcode)
return cmd->allowed_opcode == opcode;
}
+static int cmd_alloc_index_retry(struct mlx5_cmd *cmd)
+{
+ unsigned long alloc_end = jiffies + msecs_to_jiffies(1000);
+ int idx;
+
+retry:
+ idx = cmd_alloc_index(cmd);
+ if (idx < 0 && time_before(jiffies, alloc_end)) {
+ /* Index allocation can fail on heavy load of commands. This is a temporary
+ * situation as the current command already holds the semaphore, meaning that
+ * another command completion is being handled and it is expected to release
+ * the entry index soon.
+ */
+ cpu_relax();
+ goto retry;
+ }
+ return idx;
+}
+
static void cmd_work_handler(struct work_struct *work)
{
struct mlx5_cmd_work_ent *ent = container_of(work, struct mlx5_cmd_work_ent, work);
@@ -900,7 +919,7 @@ static void cmd_work_handler(struct work_struct *work)
sem = ent->page_queue ? &cmd->pages_sem : &cmd->sem;
down(sem);
if (!ent->page_queue) {
- alloc_ret = cmd_alloc_index(cmd);
+ alloc_ret = cmd_alloc_index_retry(cmd);
if (alloc_ret < 0) {
mlx5_core_err(dev, "failed to allocate command entry\n");
if (ent->callback) {