summaryrefslogtreecommitdiffstats
path: root/net/mptcp
diff options
context:
space:
mode:
authorGeliang Tang <geliang.tang@suse.com>2022-11-10 15:23:19 -0800
committerJakub Kicinski <kuba@kernel.org>2022-11-11 21:19:46 -0800
commit73a0052a61f98354f39f461e03f1a7e513b84578 (patch)
tree3e913422a5027219fb3ee3c348a391c23e141007 /net/mptcp
parent00df24f19179c8e8c81fffa95c91d5c2eab8964e (diff)
downloadlinux-stable-73a0052a61f98354f39f461e03f1a7e513b84578.tar.gz
linux-stable-73a0052a61f98354f39f461e03f1a7e513b84578.tar.bz2
linux-stable-73a0052a61f98354f39f461e03f1a7e513b84578.zip
mptcp: change 'first' as a parameter
The function mptcp_subflow_process_delegated() uses the input ssk first, while __mptcp_check_push() invokes the packet scheduler first. So this patch adds a new parameter named 'first' for the function __mptcp_subflow_push_pending() to deal with these two cases separately. With this change, the code that invokes the packet scheduler in the function __mptcp_check_push() can be removed, and replaced by invoking __mptcp_subflow_push_pending() directly. Reviewed-by: Mat Martineau <mathew.j.martineau@linux.intel.com> Signed-off-by: Geliang Tang <geliang.tang@suse.com> Signed-off-by: Mat Martineau <mathew.j.martineau@linux.intel.com> Signed-off-by: Jakub Kicinski <kuba@kernel.org>
Diffstat (limited to 'net/mptcp')
-rw-r--r--net/mptcp/protocol.c20
1 files changed, 6 insertions, 14 deletions
diff --git a/net/mptcp/protocol.c b/net/mptcp/protocol.c
index 64d7070de901..5a344788f843 100644
--- a/net/mptcp/protocol.c
+++ b/net/mptcp/protocol.c
@@ -1602,7 +1602,7 @@ out:
__mptcp_check_send_data_fin(sk);
}
-static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
+static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk, bool first)
{
struct mptcp_sock *msk = mptcp_sk(sk);
struct mptcp_sendmsg_info info = {
@@ -1611,7 +1611,6 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
struct mptcp_data_frag *dfrag;
struct sock *xmit_ssk;
int len, copied = 0;
- bool first = true;
info.flags = 0;
while ((dfrag = mptcp_send_head(sk))) {
@@ -1621,8 +1620,7 @@ static void __mptcp_subflow_push_pending(struct sock *sk, struct sock *ssk)
while (len > 0) {
int ret = 0;
- /* the caller already invoked the packet scheduler,
- * check for a different subflow usage only after
+ /* check for a different subflow usage only after
* spooling the first chunk of data
*/
xmit_ssk = first ? ssk : mptcp_subflow_get_send(msk);
@@ -3220,16 +3218,10 @@ void __mptcp_check_push(struct sock *sk, struct sock *ssk)
if (!mptcp_send_head(sk))
return;
- if (!sock_owned_by_user(sk)) {
- struct sock *xmit_ssk = mptcp_subflow_get_send(mptcp_sk(sk));
-
- if (xmit_ssk == ssk)
- __mptcp_subflow_push_pending(sk, ssk);
- else if (xmit_ssk)
- mptcp_subflow_delegate(mptcp_subflow_ctx(xmit_ssk), MPTCP_DELEGATE_SEND);
- } else {
+ if (!sock_owned_by_user(sk))
+ __mptcp_subflow_push_pending(sk, ssk, false);
+ else
__set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->cb_flags);
- }
}
#define MPTCP_FLAGS_PROCESS_CTX_NEED (BIT(MPTCP_PUSH_PENDING) | \
@@ -3320,7 +3312,7 @@ void mptcp_subflow_process_delegated(struct sock *ssk)
if (test_bit(MPTCP_DELEGATE_SEND, &subflow->delegated_status)) {
mptcp_data_lock(sk);
if (!sock_owned_by_user(sk))
- __mptcp_subflow_push_pending(sk, ssk);
+ __mptcp_subflow_push_pending(sk, ssk, true);
else
__set_bit(MPTCP_PUSH_PENDING, &mptcp_sk(sk)->cb_flags);
mptcp_data_unlock(sk);