summaryrefslogtreecommitdiffstats
path: root/net
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2023-05-05 19:12:01 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2023-05-05 19:12:01 -0700
commited23734c23d2fc1e6a1ff80f8c2b82faeed0ed0c (patch)
tree4837380dc9b83ea532479ea74fd6bdd5c08ac10c /net
parenta5e219005aeaf52cb10f9999a61c07a140db7097 (diff)
parent644bca1d48139ad77570c24d22bafaf8e438cf03 (diff)
downloadlinux-ed23734c23d2fc1e6a1ff80f8c2b82faeed0ed0c.tar.gz
linux-ed23734c23d2fc1e6a1ff80f8c2b82faeed0ed0c.tar.bz2
linux-ed23734c23d2fc1e6a1ff80f8c2b82faeed0ed0c.zip
Merge tag 'net-6.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net
Pull networking fixes from Jakub Kicinski: "Including fixes from netfilter. Current release - regressions: - sched: act_pedit: free pedit keys on bail from offset check Current release - new code bugs: - pds_core: - Kconfig fixes (DEBUGFS and AUXILIARY_BUS) - fix mutex double unlock in error path Previous releases - regressions: - sched: cls_api: remove block_cb from driver_list before freeing - nf_tables: fix ct untracked match breakage - eth: mtk_eth_soc: drop generic vlan rx offload - sched: flower: fix error handler on replace Previous releases - always broken: - tcp: fix skb_copy_ubufs() vs BIG TCP - ipv6: fix skb hash for some RST packets - af_packet: don't send zero-byte data in packet_sendmsg_spkt() - rxrpc: timeout handling fixes after moving client call connection to the I/O thread - ixgbe: fix panic during XDP_TX with > 64 CPUs - igc: RMW the SRRCTL register to prevent losing timestamp config - dsa: mt7530: fix corrupt frames using TRGMII on 40 MHz XTAL MT7621 - r8152: - fix flow control issue of RTL8156A - fix the poor throughput for 2.5G devices - move setting r8153b_rx_agg_chg_indicate() to fix coalescing - enable autosuspend - ncsi: clear Tx enable mode when handling a Config required AEN - octeontx2-pf: macsec: fixes for CN10KB ASIC rev Misc: - 9p: remove INET dependency" * tag 'net-6.4-rc1' of git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net: (69 commits) net: bcmgenet: Remove phy_stop() from bcmgenet_netif_stop() pds_core: fix mutex double unlock in error path net/sched: flower: fix error handler on replace Revert "net/sched: flower: Fix wrong handle assignment during filter change" net/sched: flower: fix filter idr initialization net: fec: correct the counting of XDP sent frames bonding: add xdp_features support net: enetc: check the index of the SFI rather than the handle sfc: Add back mailing list virtio_net: suppress cpu stall when free_unused_bufs ice: block LAN in case of VF to VF offload net: dsa: mt7530: fix network connectivity with multiple CPU ports net: dsa: mt7530: fix corrupt frames using trgmii on 40 MHz XTAL MT7621 9p: Remove INET dependency netfilter: nf_tables: fix ct untracked match breakage af_packet: Don't send zero-byte data in packet_sendmsg_spkt(). igc: read before write to SRRCTL register pds_core: add AUXILIARY_BUS and NET_DEVLINK to Kconfig pds_core: remove CONFIG_DEBUG_FS from makefile ionic: catch failure from devlink_alloc ...
Diffstat (limited to 'net')
-rw-r--r--net/9p/Kconfig2
-rw-r--r--net/core/skbuff.c20
-rw-r--r--net/ethtool/ioctl.c2
-rw-r--r--net/ipv6/sit.c8
-rw-r--r--net/ipv6/tcp_ipv6.c2
-rw-r--r--net/ncsi/ncsi-aen.c1
-rw-r--r--net/netfilter/nf_tables_api.c41
-rw-r--r--net/netfilter/nft_ct_fast.c14
-rw-r--r--net/netfilter/nft_dynset.c2
-rw-r--r--net/netfilter/nft_lookup.c2
-rw-r--r--net/netfilter/nft_objref.c2
-rw-r--r--net/packet/af_packet.c2
-rw-r--r--net/rxrpc/af_rxrpc.c3
-rw-r--r--net/rxrpc/ar-internal.h1
-rw-r--r--net/rxrpc/call_object.c9
-rw-r--r--net/rxrpc/sendmsg.c22
-rw-r--r--net/sched/act_mirred.c2
-rw-r--r--net/sched/act_pedit.c4
-rw-r--r--net/sched/cls_api.c1
-rw-r--r--net/sched/cls_flower.c9
20 files changed, 100 insertions, 49 deletions
diff --git a/net/9p/Kconfig b/net/9p/Kconfig
index deabbd376cb1..00ebce9e5a65 100644
--- a/net/9p/Kconfig
+++ b/net/9p/Kconfig
@@ -17,6 +17,8 @@ if NET_9P
config NET_9P_FD
default NET_9P
+ imply INET
+ imply UNIX
tristate "9P FD Transport"
help
This builds support for transports over TCP, Unix sockets and
diff --git a/net/core/skbuff.c b/net/core/skbuff.c
index 2112146092bf..26a586007d8b 100644
--- a/net/core/skbuff.c
+++ b/net/core/skbuff.c
@@ -1758,7 +1758,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
{
int num_frags = skb_shinfo(skb)->nr_frags;
struct page *page, *head = NULL;
- int i, new_frags;
+ int i, order, psize, new_frags;
u32 d_off;
if (skb_shared(skb) || skb_unclone(skb, gfp_mask))
@@ -1767,9 +1767,17 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
if (!num_frags)
goto release;
- new_frags = (__skb_pagelen(skb) + PAGE_SIZE - 1) >> PAGE_SHIFT;
+ /* We might have to allocate high order pages, so compute what minimum
+ * page order is needed.
+ */
+ order = 0;
+ while ((PAGE_SIZE << order) * MAX_SKB_FRAGS < __skb_pagelen(skb))
+ order++;
+ psize = (PAGE_SIZE << order);
+
+ new_frags = (__skb_pagelen(skb) + psize - 1) >> (PAGE_SHIFT + order);
for (i = 0; i < new_frags; i++) {
- page = alloc_page(gfp_mask);
+ page = alloc_pages(gfp_mask | __GFP_COMP, order);
if (!page) {
while (head) {
struct page *next = (struct page *)page_private(head);
@@ -1796,11 +1804,11 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
vaddr = kmap_atomic(p);
while (done < p_len) {
- if (d_off == PAGE_SIZE) {
+ if (d_off == psize) {
d_off = 0;
page = (struct page *)page_private(page);
}
- copy = min_t(u32, PAGE_SIZE - d_off, p_len - done);
+ copy = min_t(u32, psize - d_off, p_len - done);
memcpy(page_address(page) + d_off,
vaddr + p_off + done, copy);
done += copy;
@@ -1816,7 +1824,7 @@ int skb_copy_ubufs(struct sk_buff *skb, gfp_t gfp_mask)
/* skb frags point to kernel buffers */
for (i = 0; i < new_frags - 1; i++) {
- __skb_fill_page_desc(skb, i, head, 0, PAGE_SIZE);
+ __skb_fill_page_desc(skb, i, head, 0, psize);
head = (struct page *)page_private(head);
}
__skb_fill_page_desc(skb, new_frags - 1, head, 0, d_off);
diff --git a/net/ethtool/ioctl.c b/net/ethtool/ioctl.c
index 59adc4e6e9ee..6bb778e10461 100644
--- a/net/ethtool/ioctl.c
+++ b/net/ethtool/ioctl.c
@@ -574,8 +574,8 @@ static int ethtool_get_link_ksettings(struct net_device *dev,
static int ethtool_set_link_ksettings(struct net_device *dev,
void __user *useraddr)
{
+ struct ethtool_link_ksettings link_ksettings = {};
int err;
- struct ethtool_link_ksettings link_ksettings;
ASSERT_RTNL();
diff --git a/net/ipv6/sit.c b/net/ipv6/sit.c
index 063560e2cb1a..cc24cefdb85c 100644
--- a/net/ipv6/sit.c
+++ b/net/ipv6/sit.c
@@ -1095,12 +1095,13 @@ tx_err:
static void ipip6_tunnel_bind_dev(struct net_device *dev)
{
+ struct ip_tunnel *tunnel = netdev_priv(dev);
+ int t_hlen = tunnel->hlen + sizeof(struct iphdr);
struct net_device *tdev = NULL;
- struct ip_tunnel *tunnel;
+ int hlen = LL_MAX_HEADER;
const struct iphdr *iph;
struct flowi4 fl4;
- tunnel = netdev_priv(dev);
iph = &tunnel->parms.iph;
if (iph->daddr) {
@@ -1123,14 +1124,15 @@ static void ipip6_tunnel_bind_dev(struct net_device *dev)
tdev = __dev_get_by_index(tunnel->net, tunnel->parms.link);
if (tdev && !netif_is_l3_master(tdev)) {
- int t_hlen = tunnel->hlen + sizeof(struct iphdr);
int mtu;
mtu = tdev->mtu - t_hlen;
if (mtu < IPV6_MIN_MTU)
mtu = IPV6_MIN_MTU;
WRITE_ONCE(dev->mtu, mtu);
+ hlen = tdev->hard_header_len + tdev->needed_headroom;
}
+ dev->needed_headroom = t_hlen + hlen;
}
static void ipip6_tunnel_update(struct ip_tunnel *t, struct ip_tunnel_parm *p,
diff --git a/net/ipv6/tcp_ipv6.c b/net/ipv6/tcp_ipv6.c
index 244cf86c4cbb..7132eb213a7a 100644
--- a/net/ipv6/tcp_ipv6.c
+++ b/net/ipv6/tcp_ipv6.c
@@ -1065,7 +1065,7 @@ static void tcp_v6_send_reset(const struct sock *sk, struct sk_buff *skb)
if (np->repflow)
label = ip6_flowlabel(ipv6h);
priority = sk->sk_priority;
- txhash = sk->sk_hash;
+ txhash = sk->sk_txhash;
}
if (sk->sk_state == TCP_TIME_WAIT) {
label = cpu_to_be32(inet_twsk(sk)->tw_flowlabel);
diff --git a/net/ncsi/ncsi-aen.c b/net/ncsi/ncsi-aen.c
index b635c194f0a8..62fb1031763d 100644
--- a/net/ncsi/ncsi-aen.c
+++ b/net/ncsi/ncsi-aen.c
@@ -165,6 +165,7 @@ static int ncsi_aen_handler_cr(struct ncsi_dev_priv *ndp,
nc->state = NCSI_CHANNEL_INACTIVE;
list_add_tail_rcu(&nc->link, &ndp->channel_queue);
spin_unlock_irqrestore(&ndp->lock, flags);
+ nc->modes[NCSI_MODE_TX_ENABLE].enable = 0;
return ncsi_process_next_channel(ndp);
}
diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c
index 09542951656c..59fb8320ab4d 100644
--- a/net/netfilter/nf_tables_api.c
+++ b/net/netfilter/nf_tables_api.c
@@ -2075,8 +2075,10 @@ static int nft_chain_parse_hook(struct net *net,
if (!basechain) {
if (!ha[NFTA_HOOK_HOOKNUM] ||
- !ha[NFTA_HOOK_PRIORITY])
- return -EINVAL;
+ !ha[NFTA_HOOK_PRIORITY]) {
+ NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_NAME]);
+ return -ENOENT;
+ }
hook->num = ntohl(nla_get_be32(ha[NFTA_HOOK_HOOKNUM]));
hook->priority = ntohl(nla_get_be32(ha[NFTA_HOOK_PRIORITY]));
@@ -5125,12 +5127,24 @@ static void nf_tables_unbind_set(const struct nft_ctx *ctx, struct nft_set *set,
}
}
+void nf_tables_activate_set(const struct nft_ctx *ctx, struct nft_set *set)
+{
+ if (nft_set_is_anonymous(set))
+ nft_clear(ctx->net, set);
+
+ set->use++;
+}
+EXPORT_SYMBOL_GPL(nf_tables_activate_set);
+
void nf_tables_deactivate_set(const struct nft_ctx *ctx, struct nft_set *set,
struct nft_set_binding *binding,
enum nft_trans_phase phase)
{
switch (phase) {
case NFT_TRANS_PREPARE:
+ if (nft_set_is_anonymous(set))
+ nft_deactivate_next(ctx->net, set);
+
set->use--;
return;
case NFT_TRANS_ABORT:
@@ -7693,7 +7707,7 @@ static const struct nla_policy nft_flowtable_hook_policy[NFTA_FLOWTABLE_HOOK_MAX
};
static int nft_flowtable_parse_hook(const struct nft_ctx *ctx,
- const struct nlattr *attr,
+ const struct nlattr * const nla[],
struct nft_flowtable_hook *flowtable_hook,
struct nft_flowtable *flowtable,
struct netlink_ext_ack *extack, bool add)
@@ -7705,15 +7719,18 @@ static int nft_flowtable_parse_hook(const struct nft_ctx *ctx,
INIT_LIST_HEAD(&flowtable_hook->list);
- err = nla_parse_nested_deprecated(tb, NFTA_FLOWTABLE_HOOK_MAX, attr,
+ err = nla_parse_nested_deprecated(tb, NFTA_FLOWTABLE_HOOK_MAX,
+ nla[NFTA_FLOWTABLE_HOOK],
nft_flowtable_hook_policy, NULL);
if (err < 0)
return err;
if (add) {
if (!tb[NFTA_FLOWTABLE_HOOK_NUM] ||
- !tb[NFTA_FLOWTABLE_HOOK_PRIORITY])
- return -EINVAL;
+ !tb[NFTA_FLOWTABLE_HOOK_PRIORITY]) {
+ NL_SET_BAD_ATTR(extack, nla[NFTA_FLOWTABLE_NAME]);
+ return -ENOENT;
+ }
hooknum = ntohl(nla_get_be32(tb[NFTA_FLOWTABLE_HOOK_NUM]));
if (hooknum != NF_NETDEV_INGRESS)
@@ -7898,8 +7915,8 @@ static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh,
u32 flags;
int err;
- err = nft_flowtable_parse_hook(ctx, nla[NFTA_FLOWTABLE_HOOK],
- &flowtable_hook, flowtable, extack, false);
+ err = nft_flowtable_parse_hook(ctx, nla, &flowtable_hook, flowtable,
+ extack, false);
if (err < 0)
return err;
@@ -8044,8 +8061,8 @@ static int nf_tables_newflowtable(struct sk_buff *skb,
if (err < 0)
goto err3;
- err = nft_flowtable_parse_hook(&ctx, nla[NFTA_FLOWTABLE_HOOK],
- &flowtable_hook, flowtable, extack, true);
+ err = nft_flowtable_parse_hook(&ctx, nla, &flowtable_hook, flowtable,
+ extack, true);
if (err < 0)
goto err4;
@@ -8107,8 +8124,8 @@ static int nft_delflowtable_hook(struct nft_ctx *ctx,
struct nft_trans *trans;
int err;
- err = nft_flowtable_parse_hook(ctx, nla[NFTA_FLOWTABLE_HOOK],
- &flowtable_hook, flowtable, extack, false);
+ err = nft_flowtable_parse_hook(ctx, nla, &flowtable_hook, flowtable,
+ extack, false);
if (err < 0)
return err;
diff --git a/net/netfilter/nft_ct_fast.c b/net/netfilter/nft_ct_fast.c
index 89983b0613fa..e684c8a91848 100644
--- a/net/netfilter/nft_ct_fast.c
+++ b/net/netfilter/nft_ct_fast.c
@@ -15,10 +15,6 @@ void nft_ct_get_fast_eval(const struct nft_expr *expr,
unsigned int state;
ct = nf_ct_get(pkt->skb, &ctinfo);
- if (!ct) {
- regs->verdict.code = NFT_BREAK;
- return;
- }
switch (priv->key) {
case NFT_CT_STATE:
@@ -30,6 +26,16 @@ void nft_ct_get_fast_eval(const struct nft_expr *expr,
state = NF_CT_STATE_INVALID_BIT;
*dest = state;
return;
+ default:
+ break;
+ }
+
+ if (!ct) {
+ regs->verdict.code = NFT_BREAK;
+ return;
+ }
+
+ switch (priv->key) {
case NFT_CT_DIRECTION:
nft_reg_store8(dest, CTINFO2DIR(ctinfo));
return;
diff --git a/net/netfilter/nft_dynset.c b/net/netfilter/nft_dynset.c
index 274579b1696e..bd19c7aec92e 100644
--- a/net/netfilter/nft_dynset.c
+++ b/net/netfilter/nft_dynset.c
@@ -342,7 +342,7 @@ static void nft_dynset_activate(const struct nft_ctx *ctx,
{
struct nft_dynset *priv = nft_expr_priv(expr);
- priv->set->use++;
+ nf_tables_activate_set(ctx, priv->set);
}
static void nft_dynset_destroy(const struct nft_ctx *ctx,
diff --git a/net/netfilter/nft_lookup.c b/net/netfilter/nft_lookup.c
index cecf8ab90e58..03ef4fdaa460 100644
--- a/net/netfilter/nft_lookup.c
+++ b/net/netfilter/nft_lookup.c
@@ -167,7 +167,7 @@ static void nft_lookup_activate(const struct nft_ctx *ctx,
{
struct nft_lookup *priv = nft_expr_priv(expr);
- priv->set->use++;
+ nf_tables_activate_set(ctx, priv->set);
}
static void nft_lookup_destroy(const struct nft_ctx *ctx,
diff --git a/net/netfilter/nft_objref.c b/net/netfilter/nft_objref.c
index cb37169608ba..a48dd5b5d45b 100644
--- a/net/netfilter/nft_objref.c
+++ b/net/netfilter/nft_objref.c
@@ -185,7 +185,7 @@ static void nft_objref_map_activate(const struct nft_ctx *ctx,
{
struct nft_objref_map *priv = nft_expr_priv(expr);
- priv->set->use++;
+ nf_tables_activate_set(ctx, priv->set);
}
static void nft_objref_map_destroy(const struct nft_ctx *ctx,
diff --git a/net/packet/af_packet.c b/net/packet/af_packet.c
index 6080c0db0814..640d94e34635 100644
--- a/net/packet/af_packet.c
+++ b/net/packet/af_packet.c
@@ -2033,7 +2033,7 @@ retry:
goto retry;
}
- if (!dev_validate_header(dev, skb->data, len)) {
+ if (!dev_validate_header(dev, skb->data, len) || !skb->len) {
err = -EINVAL;
goto out_unlock;
}
diff --git a/net/rxrpc/af_rxrpc.c b/net/rxrpc/af_rxrpc.c
index c32b164206f9..31f738d65f1c 100644
--- a/net/rxrpc/af_rxrpc.c
+++ b/net/rxrpc/af_rxrpc.c
@@ -265,6 +265,7 @@ static int rxrpc_listen(struct socket *sock, int backlog)
* @key: The security context to use (defaults to socket setting)
* @user_call_ID: The ID to use
* @tx_total_len: Total length of data to transmit during the call (or -1)
+ * @hard_timeout: The maximum lifespan of the call in sec
* @gfp: The allocation constraints
* @notify_rx: Where to send notifications instead of socket queue
* @upgrade: Request service upgrade for call
@@ -283,6 +284,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
struct key *key,
unsigned long user_call_ID,
s64 tx_total_len,
+ u32 hard_timeout,
gfp_t gfp,
rxrpc_notify_rx_t notify_rx,
bool upgrade,
@@ -313,6 +315,7 @@ struct rxrpc_call *rxrpc_kernel_begin_call(struct socket *sock,
p.tx_total_len = tx_total_len;
p.interruptibility = interruptibility;
p.kernel = true;
+ p.timeouts.hard = hard_timeout;
memset(&cp, 0, sizeof(cp));
cp.local = rx->local;
diff --git a/net/rxrpc/ar-internal.h b/net/rxrpc/ar-internal.h
index 67b0a894162d..5d44dc08f66d 100644
--- a/net/rxrpc/ar-internal.h
+++ b/net/rxrpc/ar-internal.h
@@ -616,6 +616,7 @@ struct rxrpc_call {
unsigned long expect_term_by; /* When we expect call termination by */
u32 next_rx_timo; /* Timeout for next Rx packet (jif) */
u32 next_req_timo; /* Timeout for next Rx request packet (jif) */
+ u32 hard_timo; /* Maximum lifetime or 0 (jif) */
struct timer_list timer; /* Combined event timer */
struct work_struct destroyer; /* In-process-context destroyer */
rxrpc_notify_rx_t notify_rx; /* kernel service Rx notification function */
diff --git a/net/rxrpc/call_object.c b/net/rxrpc/call_object.c
index 3e5cc70884dd..773eecd1e979 100644
--- a/net/rxrpc/call_object.c
+++ b/net/rxrpc/call_object.c
@@ -224,6 +224,13 @@ static struct rxrpc_call *rxrpc_alloc_client_call(struct rxrpc_sock *rx,
if (cp->exclusive)
__set_bit(RXRPC_CALL_EXCLUSIVE, &call->flags);
+ if (p->timeouts.normal)
+ call->next_rx_timo = min(msecs_to_jiffies(p->timeouts.normal), 1UL);
+ if (p->timeouts.idle)
+ call->next_req_timo = min(msecs_to_jiffies(p->timeouts.idle), 1UL);
+ if (p->timeouts.hard)
+ call->hard_timo = p->timeouts.hard * HZ;
+
ret = rxrpc_init_client_call_security(call);
if (ret < 0) {
rxrpc_prefail_call(call, RXRPC_CALL_LOCAL_ERROR, ret);
@@ -255,7 +262,7 @@ void rxrpc_start_call_timer(struct rxrpc_call *call)
call->keepalive_at = j;
call->expect_rx_by = j;
call->expect_req_by = j;
- call->expect_term_by = j;
+ call->expect_term_by = j + call->hard_timo;
call->timer.expires = now;
}
diff --git a/net/rxrpc/sendmsg.c b/net/rxrpc/sendmsg.c
index da49fcf1c456..8e0b94714e84 100644
--- a/net/rxrpc/sendmsg.c
+++ b/net/rxrpc/sendmsg.c
@@ -50,15 +50,11 @@ static int rxrpc_wait_to_be_connected(struct rxrpc_call *call, long *timeo)
_enter("%d", call->debug_id);
if (rxrpc_call_state(call) != RXRPC_CALL_CLIENT_AWAIT_CONN)
- return call->error;
+ goto no_wait;
add_wait_queue_exclusive(&call->waitq, &myself);
for (;;) {
- ret = call->error;
- if (ret < 0)
- break;
-
switch (call->interruptibility) {
case RXRPC_INTERRUPTIBLE:
case RXRPC_PREINTERRUPTIBLE:
@@ -69,10 +65,9 @@ static int rxrpc_wait_to_be_connected(struct rxrpc_call *call, long *timeo)
set_current_state(TASK_UNINTERRUPTIBLE);
break;
}
- if (rxrpc_call_state(call) != RXRPC_CALL_CLIENT_AWAIT_CONN) {
- ret = call->error;
+
+ if (rxrpc_call_state(call) != RXRPC_CALL_CLIENT_AWAIT_CONN)
break;
- }
if ((call->interruptibility == RXRPC_INTERRUPTIBLE ||
call->interruptibility == RXRPC_PREINTERRUPTIBLE) &&
signal_pending(current)) {
@@ -85,6 +80,7 @@ static int rxrpc_wait_to_be_connected(struct rxrpc_call *call, long *timeo)
remove_wait_queue(&call->waitq, &myself);
__set_current_state(TASK_RUNNING);
+no_wait:
if (ret == 0 && rxrpc_call_is_complete(call))
ret = call->error;
@@ -655,15 +651,19 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
if (IS_ERR(call))
return PTR_ERR(call);
/* ... and we have the call lock. */
+ p.call.nr_timeouts = 0;
ret = 0;
if (rxrpc_call_is_complete(call))
goto out_put_unlock;
} else {
switch (rxrpc_call_state(call)) {
- case RXRPC_CALL_UNINITIALISED:
case RXRPC_CALL_CLIENT_AWAIT_CONN:
- case RXRPC_CALL_SERVER_PREALLOC:
case RXRPC_CALL_SERVER_SECURING:
+ if (p.command == RXRPC_CMD_SEND_ABORT)
+ break;
+ fallthrough;
+ case RXRPC_CALL_UNINITIALISED:
+ case RXRPC_CALL_SERVER_PREALLOC:
rxrpc_put_call(call, rxrpc_call_put_sendmsg);
ret = -EBUSY;
goto error_release_sock;
@@ -703,7 +703,7 @@ int rxrpc_do_sendmsg(struct rxrpc_sock *rx, struct msghdr *msg, size_t len)
fallthrough;
case 1:
if (p.call.timeouts.hard > 0) {
- j = msecs_to_jiffies(p.call.timeouts.hard);
+ j = p.call.timeouts.hard * HZ;
now = jiffies;
j += now;
WRITE_ONCE(call->expect_term_by, j);
diff --git a/net/sched/act_mirred.c b/net/sched/act_mirred.c
index ec43764e92e7..0a711c184c29 100644
--- a/net/sched/act_mirred.c
+++ b/net/sched/act_mirred.c
@@ -264,7 +264,7 @@ TC_INDIRECT_SCOPE int tcf_mirred_act(struct sk_buff *skb,
goto out;
}
- if (unlikely(!(dev->flags & IFF_UP))) {
+ if (unlikely(!(dev->flags & IFF_UP)) || !netif_carrier_ok(dev)) {
net_notice_ratelimited("tc mirred to Houston: device %s is down\n",
dev->name);
goto out;
diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c
index fb93d4c1faca..fc945c7e4123 100644
--- a/net/sched/act_pedit.c
+++ b/net/sched/act_pedit.c
@@ -258,7 +258,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
if (!offmask && cur % 4) {
NL_SET_ERR_MSG_MOD(extack, "Offsets must be on 32bit boundaries");
ret = -EINVAL;
- goto put_chain;
+ goto out_free_keys;
}
/* sanitize the shift value for any later use */
@@ -291,6 +291,8 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla,
return ret;
+out_free_keys:
+ kfree(nparms->tcfp_keys);
put_chain:
if (goto_ch)
tcf_chain_put_by_act(goto_ch);
diff --git a/net/sched/cls_api.c b/net/sched/cls_api.c
index 3c3629c9e7b6..2621550bfddc 100644
--- a/net/sched/cls_api.c
+++ b/net/sched/cls_api.c
@@ -1589,6 +1589,7 @@ static int tcf_block_bind(struct tcf_block *block,
err_unroll:
list_for_each_entry_safe(block_cb, next, &bo->cb_list, list) {
+ list_del(&block_cb->driver_list);
if (i-- > 0) {
list_del(&block_cb->list);
tcf_block_playback_offloads(block, block_cb->cb,
diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c
index cc49256d5318..9dbc43388e57 100644
--- a/net/sched/cls_flower.c
+++ b/net/sched/cls_flower.c
@@ -2210,10 +2210,10 @@ static int fl_change(struct net *net, struct sk_buff *in_skb,
spin_lock(&tp->lock);
if (!handle) {
handle = 1;
- err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
+ err = idr_alloc_u32(&head->handle_idr, NULL, &handle,
INT_MAX, GFP_ATOMIC);
} else {
- err = idr_alloc_u32(&head->handle_idr, fnew, &handle,
+ err = idr_alloc_u32(&head->handle_idr, NULL, &handle,
handle, GFP_ATOMIC);
/* Filter with specified handle was concurrently
@@ -2339,7 +2339,8 @@ errout_hw:
errout_mask:
fl_mask_put(head, fnew->mask);
errout_idr:
- idr_remove(&head->handle_idr, fnew->handle);
+ if (!fold)
+ idr_remove(&head->handle_idr, fnew->handle);
__fl_put(fnew);
errout_tb:
kfree(tb);
@@ -2378,7 +2379,7 @@ static void fl_walk(struct tcf_proto *tp, struct tcf_walker *arg,
rcu_read_lock();
idr_for_each_entry_continue_ul(&head->handle_idr, f, tmp, id) {
/* don't return filters that are being deleted */
- if (!refcount_inc_not_zero(&f->refcnt))
+ if (!f || !refcount_inc_not_zero(&f->refcnt))
continue;
rcu_read_unlock();