summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--include/net/inetpeer.h17
-rw-r--r--net/ipv4/inetpeer.c29
2 files changed, 24 insertions, 22 deletions
diff --git a/include/net/inetpeer.h b/include/net/inetpeer.h
index 925573fd2aed..f13cc0c2b163 100644
--- a/include/net/inetpeer.h
+++ b/include/net/inetpeer.h
@@ -19,7 +19,7 @@ struct inet_peer
{
struct inet_peer *avl_left, *avl_right;
struct inet_peer *unused_next, **unused_prevp;
- unsigned long dtime; /* the time of last use of not
+ __u32 dtime; /* the time of last use of not
* referenced entries */
atomic_t refcnt;
__be32 v4daddr; /* peer's address */
@@ -35,21 +35,8 @@ void inet_initpeers(void) __init;
/* can be called with or without local BH being disabled */
struct inet_peer *inet_getpeer(__be32 daddr, int create);
-extern spinlock_t inet_peer_unused_lock;
-extern struct inet_peer **inet_peer_unused_tailp;
/* can be called from BH context or outside */
-static inline void inet_putpeer(struct inet_peer *p)
-{
- spin_lock_bh(&inet_peer_unused_lock);
- if (atomic_dec_and_test(&p->refcnt)) {
- p->unused_prevp = inet_peer_unused_tailp;
- p->unused_next = NULL;
- *inet_peer_unused_tailp = p;
- inet_peer_unused_tailp = &p->unused_next;
- p->dtime = jiffies;
- }
- spin_unlock_bh(&inet_peer_unused_lock);
-}
+extern void inet_putpeer(struct inet_peer *p);
extern spinlock_t inet_peer_idlock;
/* can be called with or without local BH being disabled */
diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c
index 2b1a54b59c48..f072f3875af8 100644
--- a/net/ipv4/inetpeer.c
+++ b/net/ipv4/inetpeer.c
@@ -94,10 +94,8 @@ int inet_peer_minttl = 120 * HZ; /* TTL under high load: 120 sec */
int inet_peer_maxttl = 10 * 60 * HZ; /* usual time to live: 10 min */
static struct inet_peer *inet_peer_unused_head;
-/* Exported for inet_putpeer inline function. */
-struct inet_peer **inet_peer_unused_tailp = &inet_peer_unused_head;
-DEFINE_SPINLOCK(inet_peer_unused_lock);
-#define PEER_MAX_CLEANUP_WORK 30
+static struct inet_peer **inet_peer_unused_tailp = &inet_peer_unused_head;
+static DEFINE_SPINLOCK(inet_peer_unused_lock);
static void peer_check_expire(unsigned long dummy);
static DEFINE_TIMER(peer_periodic_timer, peer_check_expire, 0, 0);
@@ -340,7 +338,8 @@ static int cleanup_once(unsigned long ttl)
spin_lock_bh(&inet_peer_unused_lock);
p = inet_peer_unused_head;
if (p != NULL) {
- if (time_after(p->dtime + ttl, jiffies)) {
+ __u32 delta = (__u32)jiffies - p->dtime;
+ if (delta < ttl) {
/* Do not prune fresh entries. */
spin_unlock_bh(&inet_peer_unused_lock);
return -1;
@@ -432,7 +431,7 @@ out_free:
/* Called with local BH disabled. */
static void peer_check_expire(unsigned long dummy)
{
- int i;
+ unsigned long now = jiffies;
int ttl;
if (peer_total >= inet_peer_threshold)
@@ -441,7 +440,10 @@ static void peer_check_expire(unsigned long dummy)
ttl = inet_peer_maxttl
- (inet_peer_maxttl - inet_peer_minttl) / HZ *
peer_total / inet_peer_threshold * HZ;
- for (i = 0; i < PEER_MAX_CLEANUP_WORK && !cleanup_once(ttl); i++);
+ while (!cleanup_once(ttl)) {
+ if (jiffies != now)
+ break;
+ }
/* Trigger the timer after inet_peer_gc_mintime .. inet_peer_gc_maxtime
* interval depending on the total number of entries (more entries,
@@ -455,3 +457,16 @@ static void peer_check_expire(unsigned long dummy)
peer_total / inet_peer_threshold * HZ;
add_timer(&peer_periodic_timer);
}
+
+void inet_putpeer(struct inet_peer *p)
+{
+ spin_lock_bh(&inet_peer_unused_lock);
+ if (atomic_dec_and_test(&p->refcnt)) {
+ p->unused_prevp = inet_peer_unused_tailp;
+ p->unused_next = NULL;
+ *inet_peer_unused_tailp = p;
+ inet_peer_unused_tailp = &p->unused_next;
+ p->dtime = (__u32)jiffies;
+ }
+ spin_unlock_bh(&inet_peer_unused_lock);
+}