summaryrefslogtreecommitdiffstats
path: root/net/netfilter
diff options
context:
space:
mode:
authorTony Zelenoff <antonz@parallels.com>2012-02-22 10:48:01 +0400
committerPablo Neira Ayuso <pablo@netfilter.org>2012-03-07 17:40:53 +0100
commit58020f77612747271ffa13e63cbff6ba12f49c2e (patch)
tree8b209c15462d11695e2946142ee3bee75e7ea6df /net/netfilter
parent93326ae31222ddb8f4835e0461d3eb9959482ffd (diff)
downloadlinux-stable-58020f77612747271ffa13e63cbff6ba12f49c2e.tar.gz
linux-stable-58020f77612747271ffa13e63cbff6ba12f49c2e.tar.bz2
linux-stable-58020f77612747271ffa13e63cbff6ba12f49c2e.zip
netfilter: nf_ct_ecache: refactor nf_ct_deliver_cached_events
* identation lowered * some CPU cycles saved at delayed item variable initialization Signed-off-by: Tony Zelenoff <antonz@parallels.com> Signed-off-by: Pablo Neira Ayuso <pablo@netfilter.org>
Diffstat (limited to 'net/netfilter')
-rw-r--r--net/netfilter/nf_conntrack_ecache.c55
1 files changed, 29 insertions, 26 deletions
diff --git a/net/netfilter/nf_conntrack_ecache.c b/net/netfilter/nf_conntrack_ecache.c
index aa15977b1781..5bd3047ddeec 100644
--- a/net/netfilter/nf_conntrack_ecache.c
+++ b/net/netfilter/nf_conntrack_ecache.c
@@ -32,9 +32,11 @@ static DEFINE_MUTEX(nf_ct_ecache_mutex);
void nf_ct_deliver_cached_events(struct nf_conn *ct)
{
struct net *net = nf_ct_net(ct);
- unsigned long events;
+ unsigned long events, missed;
struct nf_ct_event_notifier *notify;
struct nf_conntrack_ecache *e;
+ struct nf_ct_event item;
+ int ret;
rcu_read_lock();
notify = rcu_dereference(net->ct.nf_conntrack_event_cb);
@@ -47,31 +49,32 @@ void nf_ct_deliver_cached_events(struct nf_conn *ct)
events = xchg(&e->cache, 0);
- if (nf_ct_is_confirmed(ct) && !nf_ct_is_dying(ct) && events) {
- struct nf_ct_event item = {
- .ct = ct,
- .pid = 0,
- .report = 0
- };
- int ret;
- /* We make a copy of the missed event cache without taking
- * the lock, thus we may send missed events twice. However,
- * this does not harm and it happens very rarely. */
- unsigned long missed = e->missed;
-
- if (!((events | missed) & e->ctmask))
- goto out_unlock;
-
- ret = notify->fcn(events | missed, &item);
- if (unlikely(ret < 0 || missed)) {
- spin_lock_bh(&ct->lock);
- if (ret < 0)
- e->missed |= events;
- else
- e->missed &= ~missed;
- spin_unlock_bh(&ct->lock);
- }
- }
+ if (!nf_ct_is_confirmed(ct) || nf_ct_is_dying(ct) || !events)
+ goto out_unlock;
+
+ /* We make a copy of the missed event cache without taking
+ * the lock, thus we may send missed events twice. However,
+ * this does not harm and it happens very rarely. */
+ missed = e->missed;
+
+ if (!((events | missed) & e->ctmask))
+ goto out_unlock;
+
+ item.ct = ct;
+ item.pid = 0;
+ item.report = 0;
+
+ ret = notify->fcn(events | missed, &item);
+
+ if (likely(ret >= 0 && !missed))
+ goto out_unlock;
+
+ spin_lock_bh(&ct->lock);
+ if (ret < 0)
+ e->missed |= events;
+ else
+ e->missed &= ~missed;
+ spin_unlock_bh(&ct->lock);
out_unlock:
rcu_read_unlock();