summaryrefslogtreecommitdiffstats
path: root/net/smc/smc_core.c
diff options
context:
space:
mode:
authorKarsten Graul <kgraul@linux.ibm.com>2020-07-08 17:05:11 +0200
committerDavid S. Miller <davem@davemloft.net>2020-07-08 12:35:15 -0700
commit6778a6bed09b58beca936a675e9dd195c0986580 (patch)
treeb87ff518c2412230064a64641109ba9a0267c628 /net/smc/smc_core.c
parenta42e6aee7f47a8a68d09923c720fc8f605a04207 (diff)
downloadlinux-6778a6bed09b58beca936a675e9dd195c0986580.tar.gz
linux-6778a6bed09b58beca936a675e9dd195c0986580.tar.bz2
linux-6778a6bed09b58beca936a675e9dd195c0986580.zip
net/smc: separate LLC wait queues for flow and messages
There might be races in scenarios where both SMC link groups are on the same system. Prevent that by creating separate wait queues for LLC flows and messages. Switch to non-interruptable versions of wait_event() and wake_up() for the llc flow waiter to make sure the waiters get control sequentially. Fine tune the llc_flow_lock to include the assignment of the message. Write to system log when an unexpected message was dropped. And remove an extra indirection and use the existing local variable lgr in smc_llc_enqueue(). Fixes: 555da9af827d ("net/smc: add event-based llc_flow framework") Reviewed-by: Ursula Braun <ubraun@linux.ibm.com> Signed-off-by: Karsten Graul <kgraul@linux.ibm.com> Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/smc/smc_core.c')
-rw-r--r--net/smc/smc_core.c38
1 files changed, 22 insertions, 16 deletions
diff --git a/net/smc/smc_core.c b/net/smc/smc_core.c
index 7964a21e5e6f..d695ce71837e 100644
--- a/net/smc/smc_core.c
+++ b/net/smc/smc_core.c
@@ -247,7 +247,8 @@ static void smcr_lgr_link_deactivate_all(struct smc_link_group *lgr)
if (smc_link_usable(lnk))
lnk->state = SMC_LNK_INACTIVE;
}
- wake_up_interruptible_all(&lgr->llc_waiter);
+ wake_up_all(&lgr->llc_msg_waiter);
+ wake_up_all(&lgr->llc_flow_waiter);
}
static void smc_lgr_free(struct smc_link_group *lgr);
@@ -1130,18 +1131,19 @@ static void smcr_link_up(struct smc_link_group *lgr,
return;
if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
/* some other llc task is ongoing */
- wait_event_interruptible_timeout(lgr->llc_waiter,
- (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE),
+ wait_event_timeout(lgr->llc_flow_waiter,
+ (list_empty(&lgr->list) ||
+ lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE),
SMC_LLC_WAIT_TIME);
}
- if (list_empty(&lgr->list) ||
- !smc_ib_port_active(smcibdev, ibport))
- return; /* lgr or device no longer active */
- link = smc_llc_usable_link(lgr);
- if (!link)
- return;
- smc_llc_send_add_link(link, smcibdev->mac[ibport - 1], gid,
- NULL, SMC_LLC_REQ);
+ /* lgr or device no longer active? */
+ if (!list_empty(&lgr->list) &&
+ smc_ib_port_active(smcibdev, ibport))
+ link = smc_llc_usable_link(lgr);
+ if (link)
+ smc_llc_send_add_link(link, smcibdev->mac[ibport - 1],
+ gid, NULL, SMC_LLC_REQ);
+ wake_up(&lgr->llc_flow_waiter); /* wake up next waiter */
}
}
@@ -1195,13 +1197,17 @@ static void smcr_link_down(struct smc_link *lnk)
if (lgr->llc_flow_lcl.type != SMC_LLC_FLOW_NONE) {
/* another llc task is ongoing */
mutex_unlock(&lgr->llc_conf_mutex);
- wait_event_interruptible_timeout(lgr->llc_waiter,
- (lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE),
+ wait_event_timeout(lgr->llc_flow_waiter,
+ (list_empty(&lgr->list) ||
+ lgr->llc_flow_lcl.type == SMC_LLC_FLOW_NONE),
SMC_LLC_WAIT_TIME);
mutex_lock(&lgr->llc_conf_mutex);
}
- smc_llc_send_delete_link(to_lnk, del_link_id, SMC_LLC_REQ, true,
- SMC_LLC_DEL_LOST_PATH);
+ if (!list_empty(&lgr->list))
+ smc_llc_send_delete_link(to_lnk, del_link_id,
+ SMC_LLC_REQ, true,
+ SMC_LLC_DEL_LOST_PATH);
+ wake_up(&lgr->llc_flow_waiter); /* wake up next waiter */
}
}
@@ -1262,7 +1268,7 @@ static void smc_link_down_work(struct work_struct *work)
if (list_empty(&lgr->list))
return;
- wake_up_interruptible_all(&lgr->llc_waiter);
+ wake_up_all(&lgr->llc_msg_waiter);
mutex_lock(&lgr->llc_conf_mutex);
smcr_link_down(link);
mutex_unlock(&lgr->llc_conf_mutex);