diff options
author | Paul E. McKenney <paul.mckenney@linaro.org> | 2012-07-23 16:03:51 -0700 |
---|---|---|
committer | Paul E. McKenney <paulmck@linux.vnet.ibm.com> | 2012-09-23 07:41:56 -0700 |
commit | bcfa57ce10d3d53d37a6e324f3010b1ce6a2784a (patch) | |
tree | 1d365a7a64e3ea1d1dc743afcc1de4b6935caa57 | |
parent | 25d30cf4250f74e5ceb35f8f39739782408db633 (diff) | |
download | linux-bcfa57ce10d3d53d37a6e324f3010b1ce6a2784a.tar.gz linux-bcfa57ce10d3d53d37a6e324f3010b1ce6a2784a.tar.bz2 linux-bcfa57ce10d3d53d37a6e324f3010b1ce6a2784a.zip |
rcu: Eliminate signed overflow in synchronize_rcu_expedited()
In the C language, signed overflow is undefined. It is true that
twos-complement arithmetic normally comes to the rescue, but if the
compiler can subvert this any time it has any information about the values
being compared. For example, given "if (a - b > 0)", if the compiler
has enough information to realize that (for example) the value of "a"
is positive and that of "b" is negative, the compiler is within its
rights to optimize to a simple "if (1)", which might not be what you want.
This commit therefore converts synchronize_rcu_expedited()'s work-done
detection counter from signed to unsigned.
Signed-off-by: Paul E. McKenney <paul.mckenney@linaro.org>
Signed-off-by: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Reviewed-by: Josh Triplett <josh@joshtriplett.org>
-rw-r--r-- | kernel/rcutree_plugin.h | 8 |
1 files changed, 4 insertions, 4 deletions
diff --git a/kernel/rcutree_plugin.h b/kernel/rcutree_plugin.h index eb8dcd1bc4b5..cb5879386a02 100644 --- a/kernel/rcutree_plugin.h +++ b/kernel/rcutree_plugin.h @@ -677,7 +677,7 @@ void synchronize_rcu(void) EXPORT_SYMBOL_GPL(synchronize_rcu); static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq); -static long sync_rcu_preempt_exp_count; +static unsigned long sync_rcu_preempt_exp_count; static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex); /* @@ -792,7 +792,7 @@ void synchronize_rcu_expedited(void) unsigned long flags; struct rcu_node *rnp; struct rcu_state *rsp = &rcu_preempt_state; - long snap; + unsigned long snap; int trycount = 0; smp_mb(); /* Caller's modifications seen first by other CPUs. */ @@ -811,10 +811,10 @@ void synchronize_rcu_expedited(void) synchronize_rcu(); return; } - if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0) + if (ULONG_CMP_LT(snap, ACCESS_ONCE(sync_rcu_preempt_exp_count))) goto mb_ret; /* Others did our work for us. */ } - if ((ACCESS_ONCE(sync_rcu_preempt_exp_count) - snap) > 0) + if (ULONG_CMP_LT(snap, ACCESS_ONCE(sync_rcu_preempt_exp_count))) goto unlock_mb_ret; /* Others did our work for us. */ /* force all RCU readers onto ->blkd_tasks lists. */ |