diff options
Diffstat (limited to 'net')
-rw-r--r-- | net/appletalk/ddp.c | 4 | ||||
-rw-r--r-- | net/core/drop_monitor.c | 102 | ||||
-rw-r--r-- | net/core/filter.c | 4 | ||||
-rw-r--r-- | net/core/neighbour.c | 14 | ||||
-rw-r--r-- | net/core/skbuff.c | 2 | ||||
-rw-r--r-- | net/ipv4/inetpeer.c | 16 | ||||
-rw-r--r-- | net/ipv4/ip_forward.c | 1 | ||||
-rw-r--r-- | net/ipv4/ipmr.c | 1 | ||||
-rw-r--r-- | net/ipv6/ip6_fib.c | 2 | ||||
-rw-r--r-- | net/ipv6/ip6_output.c | 1 | ||||
-rw-r--r-- | net/ipv6/ip6mr.c | 2 | ||||
-rw-r--r-- | net/l2tp/l2tp_eth.c | 2 | ||||
-rw-r--r-- | net/l2tp/l2tp_ip.c | 9 |
13 files changed, 69 insertions, 91 deletions
diff --git a/net/appletalk/ddp.c b/net/appletalk/ddp.c index 0301b328cf0f..86852963b7f7 100644 --- a/net/appletalk/ddp.c +++ b/net/appletalk/ddp.c @@ -1208,9 +1208,7 @@ static int atalk_connect(struct socket *sock, struct sockaddr *uaddr, if (addr->sat_addr.s_node == ATADDR_BCAST && !sock_flag(sk, SOCK_BROADCAST)) { #if 1 - printk(KERN_WARNING "%s is broken and did not set " - "SO_BROADCAST. It will break when 2.2 is " - "released.\n", + pr_warn("atalk_connect: %s is broken and did not set SO_BROADCAST.\n", current->comm); #else return -EACCES; diff --git a/net/core/drop_monitor.c b/net/core/drop_monitor.c index ea5fb9fcc3f5..d23b6682f4e9 100644 --- a/net/core/drop_monitor.c +++ b/net/core/drop_monitor.c @@ -36,9 +36,6 @@ #define TRACE_ON 1 #define TRACE_OFF 0 -static void send_dm_alert(struct work_struct *unused); - - /* * Globals, our netlink socket pointer * and the work handle that will send up @@ -48,11 +45,10 @@ static int trace_state = TRACE_OFF; static DEFINE_MUTEX(trace_state_mutex); struct per_cpu_dm_data { - struct work_struct dm_alert_work; - struct sk_buff __rcu *skb; - atomic_t dm_hit_count; - struct timer_list send_timer; - int cpu; + spinlock_t lock; + struct sk_buff *skb; + struct work_struct dm_alert_work; + struct timer_list send_timer; }; struct dm_hw_stat_delta { @@ -78,13 +74,13 @@ static int dm_delay = 1; static unsigned long dm_hw_check_delta = 2*HZ; static LIST_HEAD(hw_stats_list); -static void reset_per_cpu_data(struct per_cpu_dm_data *data) +static struct sk_buff *reset_per_cpu_data(struct per_cpu_dm_data *data) { size_t al; struct net_dm_alert_msg *msg; struct nlattr *nla; struct sk_buff *skb; - struct sk_buff *oskb = rcu_dereference_protected(data->skb, 1); + unsigned long flags; al = sizeof(struct net_dm_alert_msg); al += dm_hit_limit * sizeof(struct net_dm_drop_point); @@ -99,65 +95,40 @@ static void reset_per_cpu_data(struct per_cpu_dm_data *data) sizeof(struct net_dm_alert_msg)); msg = nla_data(nla); memset(msg, 0, al); - } else - schedule_work_on(data->cpu, &data->dm_alert_work); - - /* - * Don't need to lock this, since we are guaranteed to only - * run this on a single cpu at a time. - * Note also that we only update data->skb if the old and new skb - * pointers don't match. This ensures that we don't continually call - * synchornize_rcu if we repeatedly fail to alloc a new netlink message. - */ - if (skb != oskb) { - rcu_assign_pointer(data->skb, skb); - - synchronize_rcu(); - - atomic_set(&data->dm_hit_count, dm_hit_limit); + } else { + mod_timer(&data->send_timer, jiffies + HZ / 10); } + spin_lock_irqsave(&data->lock, flags); + swap(data->skb, skb); + spin_unlock_irqrestore(&data->lock, flags); + + return skb; } -static void send_dm_alert(struct work_struct *unused) +static void send_dm_alert(struct work_struct *work) { struct sk_buff *skb; - struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data); + struct per_cpu_dm_data *data; - WARN_ON_ONCE(data->cpu != smp_processor_id()); + data = container_of(work, struct per_cpu_dm_data, dm_alert_work); - /* - * Grab the skb we're about to send - */ - skb = rcu_dereference_protected(data->skb, 1); - - /* - * Replace it with a new one - */ - reset_per_cpu_data(data); + skb = reset_per_cpu_data(data); - /* - * Ship it! - */ if (skb) genlmsg_multicast(skb, 0, NET_DM_GRP_ALERT, GFP_KERNEL); - - put_cpu_var(dm_cpu_data); } /* * This is the timer function to delay the sending of an alert * in the event that more drops will arrive during the - * hysteresis period. Note that it operates under the timer interrupt - * so we don't need to disable preemption here + * hysteresis period. */ -static void sched_send_work(unsigned long unused) +static void sched_send_work(unsigned long _data) { - struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data); - - schedule_work_on(smp_processor_id(), &data->dm_alert_work); + struct per_cpu_dm_data *data = (struct per_cpu_dm_data *)_data; - put_cpu_var(dm_cpu_data); + schedule_work(&data->dm_alert_work); } static void trace_drop_common(struct sk_buff *skb, void *location) @@ -167,33 +138,28 @@ static void trace_drop_common(struct sk_buff *skb, void *location) struct nlattr *nla; int i; struct sk_buff *dskb; - struct per_cpu_dm_data *data = &get_cpu_var(dm_cpu_data); - + struct per_cpu_dm_data *data; + unsigned long flags; - rcu_read_lock(); - dskb = rcu_dereference(data->skb); + local_irq_save(flags); + data = &__get_cpu_var(dm_cpu_data); + spin_lock(&data->lock); + dskb = data->skb; if (!dskb) goto out; - if (!atomic_add_unless(&data->dm_hit_count, -1, 0)) { - /* - * we're already at zero, discard this hit - */ - goto out; - } - nlh = (struct nlmsghdr *)dskb->data; nla = genlmsg_data(nlmsg_data(nlh)); msg = nla_data(nla); for (i = 0; i < msg->entries; i++) { if (!memcmp(&location, msg->points[i].pc, sizeof(void *))) { msg->points[i].count++; - atomic_inc(&data->dm_hit_count); goto out; } } - + if (msg->entries == dm_hit_limit) + goto out; /* * We need to create a new entry */ @@ -205,13 +171,11 @@ static void trace_drop_common(struct sk_buff *skb, void *location) if (!timer_pending(&data->send_timer)) { data->send_timer.expires = jiffies + dm_delay * HZ; - add_timer_on(&data->send_timer, smp_processor_id()); + add_timer(&data->send_timer); } out: - rcu_read_unlock(); - put_cpu_var(dm_cpu_data); - return; + spin_unlock_irqrestore(&data->lock, flags); } static void trace_kfree_skb_hit(void *ignore, struct sk_buff *skb, void *location) @@ -418,11 +382,11 @@ static int __init init_net_drop_monitor(void) for_each_possible_cpu(cpu) { data = &per_cpu(dm_cpu_data, cpu); - data->cpu = cpu; INIT_WORK(&data->dm_alert_work, send_dm_alert); init_timer(&data->send_timer); - data->send_timer.data = cpu; + data->send_timer.data = (unsigned long)data; data->send_timer.function = sched_send_work; + spin_lock_init(&data->lock); reset_per_cpu_data(data); } diff --git a/net/core/filter.c b/net/core/filter.c index a3eddb515d1b..d4ce2dc712e3 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -616,9 +616,9 @@ static int __sk_prepare_filter(struct sk_filter *fp) /** * sk_unattached_filter_create - create an unattached filter * @fprog: the filter program - * @sk: the socket to use + * @pfp: the unattached filter that is created * - * Create a filter independent ofr any socket. We first run some + * Create a filter independent of any socket. We first run some * sanity checks on it to make sure it does not explode on us later. * If an error occurs or there is insufficient memory for the filter * a negative errno code is returned. On success the return is zero. diff --git a/net/core/neighbour.c b/net/core/neighbour.c index eb09f8bbbf07..d81d026138f0 100644 --- a/net/core/neighbour.c +++ b/net/core/neighbour.c @@ -2219,9 +2219,7 @@ static int neigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, rcu_read_lock_bh(); nht = rcu_dereference_bh(tbl->nht); - for (h = 0; h < (1 << nht->hash_shift); h++) { - if (h < s_h) - continue; + for (h = s_h; h < (1 << nht->hash_shift); h++) { if (h > s_h) s_idx = 0; for (n = rcu_dereference_bh(nht->hash_buckets[h]), idx = 0; @@ -2260,9 +2258,7 @@ static int pneigh_dump_table(struct neigh_table *tbl, struct sk_buff *skb, read_lock_bh(&tbl->lock); - for (h = 0; h <= PNEIGH_HASHMASK; h++) { - if (h < s_h) - continue; + for (h = s_h; h <= PNEIGH_HASHMASK; h++) { if (h > s_h) s_idx = 0; for (n = tbl->phash_buckets[h], idx = 0; n; n = n->next) { @@ -2297,7 +2293,7 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb) struct neigh_table *tbl; int t, family, s_t; int proxy = 0; - int err = 0; + int err; read_lock(&neigh_tbl_lock); family = ((struct rtgenmsg *) nlmsg_data(cb->nlh))->rtgen_family; @@ -2311,7 +2307,7 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb) s_t = cb->args[0]; - for (tbl = neigh_tables, t = 0; tbl && (err >= 0); + for (tbl = neigh_tables, t = 0; tbl; tbl = tbl->next, t++) { if (t < s_t || (family && tbl->family != family)) continue; @@ -2322,6 +2318,8 @@ static int neigh_dump_info(struct sk_buff *skb, struct netlink_callback *cb) err = pneigh_dump_table(tbl, skb, cb); else err = neigh_dump_table(tbl, skb, cb); + if (err < 0) + break; } read_unlock(&neigh_tbl_lock); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 016694d62484..d78671e9d545 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -3361,7 +3361,7 @@ EXPORT_SYMBOL(kfree_skb_partial); * @to: prior buffer * @from: buffer to add * @fragstolen: pointer to boolean - * + * @delta_truesize: how much more was allocated than was requested */ bool skb_try_coalesce(struct sk_buff *to, struct sk_buff *from, bool *fragstolen, int *delta_truesize) diff --git a/net/ipv4/inetpeer.c b/net/ipv4/inetpeer.c index d4d61b694fab..dfba343b2509 100644 --- a/net/ipv4/inetpeer.c +++ b/net/ipv4/inetpeer.c @@ -560,6 +560,17 @@ bool inet_peer_xrlim_allow(struct inet_peer *peer, int timeout) } EXPORT_SYMBOL(inet_peer_xrlim_allow); +static void inetpeer_inval_rcu(struct rcu_head *head) +{ + struct inet_peer *p = container_of(head, struct inet_peer, gc_rcu); + + spin_lock_bh(&gc_lock); + list_add_tail(&p->gc_list, &gc_list); + spin_unlock_bh(&gc_lock); + + schedule_delayed_work(&gc_work, gc_delay); +} + void inetpeer_invalidate_tree(int family) { struct inet_peer *old, *new, *prev; @@ -576,10 +587,7 @@ void inetpeer_invalidate_tree(int family) prev = cmpxchg(&base->root, old, new); if (prev == old) { base->total = 0; - spin_lock(&gc_lock); - list_add_tail(&prev->gc_list, &gc_list); - spin_unlock(&gc_lock); - schedule_delayed_work(&gc_work, gc_delay); + call_rcu(&prev->gc_rcu, inetpeer_inval_rcu); } out: diff --git a/net/ipv4/ip_forward.c b/net/ipv4/ip_forward.c index e5c44fc586ab..ab09b126423c 100644 --- a/net/ipv4/ip_forward.c +++ b/net/ipv4/ip_forward.c @@ -44,6 +44,7 @@ static int ip_forward_finish(struct sk_buff *skb) struct ip_options *opt = &(IPCB(skb)->opt); IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); + IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len); if (unlikely(opt->optlen)) ip_forward_options(skb); diff --git a/net/ipv4/ipmr.c b/net/ipv4/ipmr.c index a9e519ad6db5..c94bbc6f2ba3 100644 --- a/net/ipv4/ipmr.c +++ b/net/ipv4/ipmr.c @@ -1574,6 +1574,7 @@ static inline int ipmr_forward_finish(struct sk_buff *skb) struct ip_options *opt = &(IPCB(skb)->opt); IP_INC_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTFORWDATAGRAMS); + IP_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), IPSTATS_MIB_OUTOCTETS, skb->len); if (unlikely(opt->optlen)) ip_forward_options(skb); diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 0c220a416626..74c21b924a79 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -1561,7 +1561,7 @@ static int fib6_age(struct rt6_info *rt, void *arg) neigh_flags = neigh->flags; neigh_release(neigh); } - if (neigh_flags & NTF_ROUTER) { + if (!(neigh_flags & NTF_ROUTER)) { RT6_TRACE("purging route %p via non-router but gateway\n", rt); return -1; diff --git a/net/ipv6/ip6_output.c b/net/ipv6/ip6_output.c index 17b8c67998bb..decc21d19c53 100644 --- a/net/ipv6/ip6_output.c +++ b/net/ipv6/ip6_output.c @@ -526,6 +526,7 @@ int ip6_forward(struct sk_buff *skb) hdr->hop_limit--; IP6_INC_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTFORWDATAGRAMS); + IP6_ADD_STATS_BH(net, ip6_dst_idev(dst), IPSTATS_MIB_OUTOCTETS, skb->len); return NF_HOOK(NFPROTO_IPV6, NF_INET_FORWARD, skb, skb->dev, dst->dev, ip6_forward_finish); diff --git a/net/ipv6/ip6mr.c b/net/ipv6/ip6mr.c index b15dc08643a4..461e47c8e956 100644 --- a/net/ipv6/ip6mr.c +++ b/net/ipv6/ip6mr.c @@ -1886,6 +1886,8 @@ static inline int ip6mr_forward2_finish(struct sk_buff *skb) { IP6_INC_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_OUTFORWDATAGRAMS); + IP6_ADD_STATS_BH(dev_net(skb_dst(skb)->dev), ip6_dst_idev(skb_dst(skb)), + IPSTATS_MIB_OUTOCTETS, skb->len); return dst_output(skb); } diff --git a/net/l2tp/l2tp_eth.c b/net/l2tp/l2tp_eth.c index 443591d629ca..185f12f4a5fa 100644 --- a/net/l2tp/l2tp_eth.c +++ b/net/l2tp/l2tp_eth.c @@ -162,6 +162,7 @@ static void l2tp_eth_delete(struct l2tp_session *session) if (dev) { unregister_netdev(dev); spriv->dev = NULL; + module_put(THIS_MODULE); } } } @@ -249,6 +250,7 @@ static int l2tp_eth_create(struct net *net, u32 tunnel_id, u32 session_id, u32 p if (rc < 0) goto out_del_dev; + __module_get(THIS_MODULE); /* Must be done after register_netdev() */ strlcpy(session->ifname, dev->name, IFNAMSIZ); diff --git a/net/l2tp/l2tp_ip.c b/net/l2tp/l2tp_ip.c index 70614e7affab..61d8b75d2686 100644 --- a/net/l2tp/l2tp_ip.c +++ b/net/l2tp/l2tp_ip.c @@ -464,10 +464,12 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m sk->sk_bound_dev_if); if (IS_ERR(rt)) goto no_route; - if (connected) + if (connected) { sk_setup_caps(sk, &rt->dst); - else - dst_release(&rt->dst); /* safe since we hold rcu_read_lock */ + } else { + skb_dst_set(skb, &rt->dst); + goto xmit; + } } /* We dont need to clone dst here, it is guaranteed to not disappear. @@ -475,6 +477,7 @@ static int l2tp_ip_sendmsg(struct kiocb *iocb, struct sock *sk, struct msghdr *m */ skb_dst_set_noref(skb, &rt->dst); +xmit: /* Queue the packet to IP for output */ rc = ip_queue_xmit(skb, &inet->cork.fl); rcu_read_unlock(); |