diff options
author | John Fastabend <john.fastabend@gmail.com> | 2017-12-07 09:54:25 -0800 |
---|---|---|
committer | David S. Miller <davem@davemloft.net> | 2017-12-08 13:32:25 -0500 |
commit | 6b3ba9146fe64b9bebb6346c9dcfe3b4851de2d7 (patch) | |
tree | 9acd6f0cac4ac67578c21c74d29a3684c15dbe32 /net/core | |
parent | 6c148184b5c868ad2c8a5a4a777cd8097622368a (diff) | |
download | linux-6b3ba9146fe64b9bebb6346c9dcfe3b4851de2d7.tar.gz linux-6b3ba9146fe64b9bebb6346c9dcfe3b4851de2d7.tar.bz2 linux-6b3ba9146fe64b9bebb6346c9dcfe3b4851de2d7.zip |
net: sched: allow qdiscs to handle locking
This patch adds a flag for queueing disciplines to indicate the stack
does not need to use the qdisc lock to protect operations. This can
be used to build lockless scheduling algorithms and improving
performance.
The flag is checked in the tx path and the qdisc lock is only taken
if it is not set. For now use a conditional if statement. Later we
could be more aggressive if it proves worthwhile and use a static key
or wrap this in a likely().
Also the lockless case drops the TCQ_F_CAN_BYPASS logic. The reason
for this is synchronizing a qlen counter across threads proves to
cost more than doing the enqueue/dequeue operations when tested with
pktgen.
Signed-off-by: John Fastabend <john.fastabend@gmail.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
Diffstat (limited to 'net/core')
-rw-r--r-- | net/core/dev.c | 26 |
1 files changed, 22 insertions, 4 deletions
diff --git a/net/core/dev.c b/net/core/dev.c index 44c7de365f55..e32cf5c7f200 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -3162,6 +3162,21 @@ static inline int __dev_xmit_skb(struct sk_buff *skb, struct Qdisc *q, int rc; qdisc_calculate_pkt_len(skb, q); + + if (q->flags & TCQ_F_NOLOCK) { + if (unlikely(test_bit(__QDISC_STATE_DEACTIVATED, &q->state))) { + __qdisc_drop(skb, &to_free); + rc = NET_XMIT_DROP; + } else { + rc = q->enqueue(skb, q, &to_free) & NET_XMIT_MASK; + __qdisc_run(q); + } + + if (unlikely(to_free)) + kfree_skb_list(to_free); + return rc; + } + /* * Heuristic to force contended enqueues to serialize on a * separate lock before trying to get qdisc main lock. @@ -4144,19 +4159,22 @@ static __latent_entropy void net_tx_action(struct softirq_action *h) while (head) { struct Qdisc *q = head; - spinlock_t *root_lock; + spinlock_t *root_lock = NULL; head = head->next_sched; - root_lock = qdisc_lock(q); - spin_lock(root_lock); + if (!(q->flags & TCQ_F_NOLOCK)) { + root_lock = qdisc_lock(q); + spin_lock(root_lock); + } /* We need to make sure head->next_sched is read * before clearing __QDISC_STATE_SCHED */ smp_mb__before_atomic(); clear_bit(__QDISC_STATE_SCHED, &q->state); qdisc_run(q); - spin_unlock(root_lock); + if (root_lock) + spin_unlock(root_lock); } } } |