summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorIngo Molnar <mingo@elte.hu>2011-01-14 15:30:16 +0100
committerIngo Molnar <mingo@elte.hu>2011-01-14 15:30:16 +0100
commit1161ec944916069ceec21c487e30247d9ff22857 (patch)
tree741385cbfde51286f502790cdfb77b1af2953944
parentf123c98e7f168e949b283690693695f988332c3d (diff)
parentb24efdfdf679cf9b05947c531971905fc727dd40 (diff)
downloadlinux-stable-1161ec944916069ceec21c487e30247d9ff22857.tar.gz
linux-stable-1161ec944916069ceec21c487e30247d9ff22857.tar.bz2
linux-stable-1161ec944916069ceec21c487e30247d9ff22857.zip
Merge branch 'rcu/next' of git://git.kernel.org/pub/scm/linux/kernel/git/paulmck/linux-2.6-rcu into core/urgent
-rw-r--r--init/Kconfig15
-rw-r--r--kernel/rcutiny.c3
-rw-r--r--kernel/srcu.c15
3 files changed, 15 insertions, 18 deletions
diff --git a/init/Kconfig b/init/Kconfig
index 8dfd094e6875..bd1ea92349cc 100644
--- a/init/Kconfig
+++ b/init/Kconfig
@@ -497,21 +497,6 @@ config RCU_BOOST_DELAY
Accept the default if unsure.
-config SRCU_SYNCHRONIZE_DELAY
- int "Microseconds to delay before waiting for readers"
- range 0 20
- default 10
- help
- This option controls how long SRCU delays before entering its
- loop waiting on SRCU readers. The purpose of this loop is
- to avoid the unconditional context-switch penalty that would
- otherwise be incurred if there was an active SRCU reader,
- in a manner similar to adaptive locking schemes. This should
- be set to be a bit longer than the common-case SRCU read-side
- critical-section overhead.
-
- Accept the default if unsure.
-
endmenu # "RCU Subsystem"
config IKCONFIG
diff --git a/kernel/rcutiny.c b/kernel/rcutiny.c
index 034493724749..0c343b9a46d5 100644
--- a/kernel/rcutiny.c
+++ b/kernel/rcutiny.c
@@ -189,7 +189,8 @@ static int rcu_kthread(void *arg)
unsigned long flags;
for (;;) {
- wait_event(rcu_kthread_wq, have_rcu_kthread_work != 0);
+ wait_event_interruptible(rcu_kthread_wq,
+ have_rcu_kthread_work != 0);
morework = rcu_boost();
local_irq_save(flags);
work = have_rcu_kthread_work;
diff --git a/kernel/srcu.c b/kernel/srcu.c
index 98d8c1e80edb..73ce23feaea9 100644
--- a/kernel/srcu.c
+++ b/kernel/srcu.c
@@ -156,6 +156,16 @@ void __srcu_read_unlock(struct srcu_struct *sp, int idx)
EXPORT_SYMBOL_GPL(__srcu_read_unlock);
/*
+ * We use an adaptive strategy for synchronize_srcu() and especially for
+ * synchronize_srcu_expedited(). We spin for a fixed time period
+ * (defined below) to allow SRCU readers to exit their read-side critical
+ * sections. If there are still some readers after 10 microseconds,
+ * we repeatedly block for 1-millisecond time periods. This approach
+ * has done well in testing, so there is no need for a config parameter.
+ */
+#define SYNCHRONIZE_SRCU_READER_DELAY 10
+
+/*
* Helper function for synchronize_srcu() and synchronize_srcu_expedited().
*/
static void __synchronize_srcu(struct srcu_struct *sp, void (*sync_func)(void))
@@ -207,11 +217,12 @@ static void __synchronize_srcu(struct srcu_struct *sp, void (*sync_func)(void))
* will have finished executing. We initially give readers
* an arbitrarily chosen 10 microseconds to get out of their
* SRCU read-side critical sections, then loop waiting 1/HZ
- * seconds per iteration.
+ * seconds per iteration. The 10-microsecond value has done
+ * very well in testing.
*/
if (srcu_readers_active_idx(sp, idx))
- udelay(CONFIG_SRCU_SYNCHRONIZE_DELAY);
+ udelay(SYNCHRONIZE_SRCU_READER_DELAY);
while (srcu_readers_active_idx(sp, idx))
schedule_timeout_interruptible(1);