diff options
author | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-11 13:20:18 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2009-09-11 13:20:18 -0700 |
commit | eee2775d9924b22643bd89b2e568cc5eed7e8a04 (patch) | |
tree | 095ad7851895c5d39596f3ff7ee1e078235a2501 /include | |
parent | 53e16fbd30005905168d9b75555fdc7e0a2eac58 (diff) | |
parent | 7db905e636f08ea5bc9825c1f73d77802e8ccad5 (diff) | |
download | linux-eee2775d9924b22643bd89b2e568cc5eed7e8a04.tar.gz linux-eee2775d9924b22643bd89b2e568cc5eed7e8a04.tar.bz2 linux-eee2775d9924b22643bd89b2e568cc5eed7e8a04.zip |
Merge branch 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip
* 'core-rcu-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip: (28 commits)
rcu: Move end of special early-boot RCU operation earlier
rcu: Changes from reviews: avoid casts, fix/add warnings, improve comments
rcu: Create rcutree plugins to handle hotplug CPU for multi-level trees
rcu: Remove lockdep annotations from RCU's _notrace() API members
rcu: Add #ifdef to suppress __rcu_offline_cpu() warning in !HOTPLUG_CPU builds
rcu: Add CPU-offline processing for single-node configurations
rcu: Add "notrace" to RCU function headers used by ftrace
rcu: Remove CONFIG_PREEMPT_RCU
rcu: Merge preemptable-RCU functionality into hierarchical RCU
rcu: Simplify rcu_pending()/rcu_check_callbacks() API
rcu: Use debugfs_remove_recursive() simplify code.
rcu: Merge per-RCU-flavor initialization into pre-existing macro
rcu: Fix online/offline indication for rcudata.csv trace file
rcu: Consolidate sparse and lockdep declarations in include/linux/rcupdate.h
rcu: Renamings to increase RCU clarity
rcu: Move private definitions from include/linux/rcutree.h to kernel/rcutree.h
rcu: Expunge lingering references to CONFIG_CLASSIC_RCU, optimize on !SMP
rcu: Delay rcu_barrier() wait until beginning of next CPU-hotunplug operation.
rcu: Fix typo in rcu_irq_exit() comment header
rcu: Make rcupreempt_trace.c look at offline CPUs
...
Diffstat (limited to 'include')
-rw-r--r-- | include/linux/cpu.h | 17 | ||||
-rw-r--r-- | include/linux/hardirq.h | 4 | ||||
-rw-r--r-- | include/linux/init_task.h | 11 | ||||
-rw-r--r-- | include/linux/pagemap.h | 4 | ||||
-rw-r--r-- | include/linux/rcuclassic.h | 178 | ||||
-rw-r--r-- | include/linux/rcupdate.h | 98 | ||||
-rw-r--r-- | include/linux/rcupreempt.h | 127 | ||||
-rw-r--r-- | include/linux/rcupreempt_trace.h | 97 | ||||
-rw-r--r-- | include/linux/rcutree.h | 262 | ||||
-rw-r--r-- | include/linux/sched.h | 32 |
10 files changed, 157 insertions, 673 deletions
diff --git a/include/linux/cpu.h b/include/linux/cpu.h index 4d668e05d458..47536197ffdd 100644 --- a/include/linux/cpu.h +++ b/include/linux/cpu.h @@ -48,6 +48,15 @@ struct notifier_block; #ifdef CONFIG_SMP /* Need to know about CPUs going up/down? */ +#if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) +#define cpu_notifier(fn, pri) { \ + static struct notifier_block fn##_nb __cpuinitdata = \ + { .notifier_call = fn, .priority = pri }; \ + register_cpu_notifier(&fn##_nb); \ +} +#else /* #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */ +#define cpu_notifier(fn, pri) do { (void)(fn); } while (0) +#endif /* #else #if defined(CONFIG_HOTPLUG_CPU) || !defined(MODULE) */ #ifdef CONFIG_HOTPLUG_CPU extern int register_cpu_notifier(struct notifier_block *nb); extern void unregister_cpu_notifier(struct notifier_block *nb); @@ -74,6 +83,8 @@ extern void cpu_maps_update_done(void); #else /* CONFIG_SMP */ +#define cpu_notifier(fn, pri) do { (void)(fn); } while (0) + static inline int register_cpu_notifier(struct notifier_block *nb) { return 0; @@ -99,11 +110,7 @@ extern struct sysdev_class cpu_sysdev_class; extern void get_online_cpus(void); extern void put_online_cpus(void); -#define hotcpu_notifier(fn, pri) { \ - static struct notifier_block fn##_nb __cpuinitdata = \ - { .notifier_call = fn, .priority = pri }; \ - register_cpu_notifier(&fn##_nb); \ -} +#define hotcpu_notifier(fn, pri) cpu_notifier(fn, pri) #define register_hotcpu_notifier(nb) register_cpu_notifier(nb) #define unregister_hotcpu_notifier(nb) unregister_cpu_notifier(nb) int cpu_down(unsigned int cpu); diff --git a/include/linux/hardirq.h b/include/linux/hardirq.h index 8246c697863d..330cb31bb496 100644 --- a/include/linux/hardirq.h +++ b/include/linux/hardirq.h @@ -132,7 +132,7 @@ static inline void account_system_vtime(struct task_struct *tsk) } #endif -#if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU) +#if defined(CONFIG_NO_HZ) extern void rcu_irq_enter(void); extern void rcu_irq_exit(void); extern void rcu_nmi_enter(void); @@ -142,7 +142,7 @@ extern void rcu_nmi_exit(void); # define rcu_irq_exit() do { } while (0) # define rcu_nmi_enter() do { } while (0) # define rcu_nmi_exit() do { } while (0) -#endif /* #if defined(CONFIG_NO_HZ) && !defined(CONFIG_CLASSIC_RCU) */ +#endif /* #if defined(CONFIG_NO_HZ) */ /* * It is safe to do non-atomic ops on ->hardirq_context, diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 7fc01b13be43..9e7f2e8fc66e 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -94,6 +94,16 @@ extern struct group_info init_groups; # define CAP_INIT_BSET CAP_INIT_EFF_SET #endif +#ifdef CONFIG_TREE_PREEMPT_RCU +#define INIT_TASK_RCU_PREEMPT(tsk) \ + .rcu_read_lock_nesting = 0, \ + .rcu_read_unlock_special = 0, \ + .rcu_blocked_node = NULL, \ + .rcu_node_entry = LIST_HEAD_INIT(tsk.rcu_node_entry), +#else +#define INIT_TASK_RCU_PREEMPT(tsk) +#endif + extern struct cred init_cred; #ifdef CONFIG_PERF_COUNTERS @@ -173,6 +183,7 @@ extern struct cred init_cred; INIT_LOCKDEP \ INIT_FTRACE_GRAPH \ INIT_TRACE_RECURSION \ + INIT_TASK_RCU_PREEMPT(tsk) \ } diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h index aec3252afcf5..ed5d7501e181 100644 --- a/include/linux/pagemap.h +++ b/include/linux/pagemap.h @@ -132,7 +132,7 @@ static inline int page_cache_get_speculative(struct page *page) { VM_BUG_ON(in_interrupt()); -#if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU) +#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) # ifdef CONFIG_PREEMPT VM_BUG_ON(!in_atomic()); # endif @@ -170,7 +170,7 @@ static inline int page_cache_add_speculative(struct page *page, int count) { VM_BUG_ON(in_interrupt()); -#if !defined(CONFIG_SMP) && defined(CONFIG_CLASSIC_RCU) +#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU) # ifdef CONFIG_PREEMPT VM_BUG_ON(!in_atomic()); # endif diff --git a/include/linux/rcuclassic.h b/include/linux/rcuclassic.h deleted file mode 100644 index bfd92e1e5d2c..000000000000 --- a/include/linux/rcuclassic.h +++ /dev/null @@ -1,178 +0,0 @@ -/* - * Read-Copy Update mechanism for mutual exclusion (classic version) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright IBM Corporation, 2001 - * - * Author: Dipankar Sarma <dipankar@in.ibm.com> - * - * Based on the original work by Paul McKenney <paulmck@us.ibm.com> - * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. - * Papers: - * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf - * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) - * - * For detailed explanation of Read-Copy Update mechanism see - - * Documentation/RCU - * - */ - -#ifndef __LINUX_RCUCLASSIC_H -#define __LINUX_RCUCLASSIC_H - -#include <linux/cache.h> -#include <linux/spinlock.h> -#include <linux/threads.h> -#include <linux/cpumask.h> -#include <linux/seqlock.h> - -#ifdef CONFIG_RCU_CPU_STALL_DETECTOR -#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rcp->jiffies_stall */ -#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rcp->jiffies_stall */ -#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ - -/* Global control variables for rcupdate callback mechanism. */ -struct rcu_ctrlblk { - long cur; /* Current batch number. */ - long completed; /* Number of the last completed batch */ - long pending; /* Number of the last pending batch */ -#ifdef CONFIG_RCU_CPU_STALL_DETECTOR - unsigned long gp_start; /* Time at which GP started in jiffies. */ - unsigned long jiffies_stall; - /* Time at which to check for CPU stalls. */ -#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ - - int signaled; - - spinlock_t lock ____cacheline_internodealigned_in_smp; - DECLARE_BITMAP(cpumask, NR_CPUS); /* CPUs that need to switch for */ - /* current batch to proceed. */ -} ____cacheline_internodealigned_in_smp; - -/* Is batch a before batch b ? */ -static inline int rcu_batch_before(long a, long b) -{ - return (a - b) < 0; -} - -/* Is batch a after batch b ? */ -static inline int rcu_batch_after(long a, long b) -{ - return (a - b) > 0; -} - -/* Per-CPU data for Read-Copy UPdate. */ -struct rcu_data { - /* 1) quiescent state handling : */ - long quiescbatch; /* Batch # for grace period */ - int passed_quiesc; /* User-mode/idle loop etc. */ - int qs_pending; /* core waits for quiesc state */ - - /* 2) batch handling */ - /* - * if nxtlist is not NULL, then: - * batch: - * The batch # for the last entry of nxtlist - * [*nxttail[1], NULL = *nxttail[2]): - * Entries that batch # <= batch - * [*nxttail[0], *nxttail[1]): - * Entries that batch # <= batch - 1 - * [nxtlist, *nxttail[0]): - * Entries that batch # <= batch - 2 - * The grace period for these entries has completed, and - * the other grace-period-completed entries may be moved - * here temporarily in rcu_process_callbacks(). - */ - long batch; - struct rcu_head *nxtlist; - struct rcu_head **nxttail[3]; - long qlen; /* # of queued callbacks */ - struct rcu_head *donelist; - struct rcu_head **donetail; - long blimit; /* Upper limit on a processed batch */ - int cpu; - struct rcu_head barrier; -}; - -/* - * Increment the quiescent state counter. - * The counter is a bit degenerated: We do not need to know - * how many quiescent states passed, just if there was at least - * one since the start of the grace period. Thus just a flag. - */ -extern void rcu_qsctr_inc(int cpu); -extern void rcu_bh_qsctr_inc(int cpu); - -extern int rcu_pending(int cpu); -extern int rcu_needs_cpu(int cpu); - -#ifdef CONFIG_DEBUG_LOCK_ALLOC -extern struct lockdep_map rcu_lock_map; -# define rcu_read_acquire() \ - lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) -# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) -#else -# define rcu_read_acquire() do { } while (0) -# define rcu_read_release() do { } while (0) -#endif - -#define __rcu_read_lock() \ - do { \ - preempt_disable(); \ - __acquire(RCU); \ - rcu_read_acquire(); \ - } while (0) -#define __rcu_read_unlock() \ - do { \ - rcu_read_release(); \ - __release(RCU); \ - preempt_enable(); \ - } while (0) -#define __rcu_read_lock_bh() \ - do { \ - local_bh_disable(); \ - __acquire(RCU_BH); \ - rcu_read_acquire(); \ - } while (0) -#define __rcu_read_unlock_bh() \ - do { \ - rcu_read_release(); \ - __release(RCU_BH); \ - local_bh_enable(); \ - } while (0) - -#define __synchronize_sched() synchronize_rcu() - -#define call_rcu_sched(head, func) call_rcu(head, func) - -extern void __rcu_init(void); -#define rcu_init_sched() do { } while (0) -extern void rcu_check_callbacks(int cpu, int user); -extern void rcu_restart_cpu(int cpu); - -extern long rcu_batches_completed(void); -extern long rcu_batches_completed_bh(void); - -#define rcu_enter_nohz() do { } while (0) -#define rcu_exit_nohz() do { } while (0) - -/* A context switch is a grace period for rcuclassic. */ -static inline int rcu_blocking_is_gp(void) -{ - return num_online_cpus() == 1; -} - -#endif /* __LINUX_RCUCLASSIC_H */ diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h index 15fbb3ca634d..95e0615f4d75 100644 --- a/include/linux/rcupdate.h +++ b/include/linux/rcupdate.h @@ -51,18 +51,26 @@ struct rcu_head { void (*func)(struct rcu_head *head); }; -/* Internal to kernel, but needed by rcupreempt.h. */ +/* Exported common interfaces */ +extern void synchronize_rcu(void); +extern void synchronize_rcu_bh(void); +extern void rcu_barrier(void); +extern void rcu_barrier_bh(void); +extern void rcu_barrier_sched(void); +extern void synchronize_sched_expedited(void); +extern int sched_expedited_torture_stats(char *page); + +/* Internal to kernel */ +extern void rcu_init(void); +extern void rcu_scheduler_starting(void); +extern int rcu_needs_cpu(int cpu); extern int rcu_scheduler_active; -#if defined(CONFIG_CLASSIC_RCU) -#include <linux/rcuclassic.h> -#elif defined(CONFIG_TREE_RCU) +#if defined(CONFIG_TREE_RCU) || defined(CONFIG_TREE_PREEMPT_RCU) #include <linux/rcutree.h> -#elif defined(CONFIG_PREEMPT_RCU) -#include <linux/rcupreempt.h> #else #error "Unknown RCU implementation specified to kernel configuration" -#endif /* #else #if defined(CONFIG_CLASSIC_RCU) */ +#endif #define RCU_HEAD_INIT { .next = NULL, .func = NULL } #define RCU_HEAD(head) struct rcu_head head = RCU_HEAD_INIT @@ -70,6 +78,16 @@ extern int rcu_scheduler_active; (ptr)->next = NULL; (ptr)->func = NULL; \ } while (0) +#ifdef CONFIG_DEBUG_LOCK_ALLOC +extern struct lockdep_map rcu_lock_map; +# define rcu_read_acquire() \ + lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) +# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) +#else +# define rcu_read_acquire() do { } while (0) +# define rcu_read_release() do { } while (0) +#endif + /** * rcu_read_lock - mark the beginning of an RCU read-side critical section. * @@ -99,7 +117,12 @@ extern int rcu_scheduler_active; * * It is illegal to block while in an RCU read-side critical section. */ -#define rcu_read_lock() __rcu_read_lock() +static inline void rcu_read_lock(void) +{ + __rcu_read_lock(); + __acquire(RCU); + rcu_read_acquire(); +} /** * rcu_read_unlock - marks the end of an RCU read-side critical section. @@ -116,7 +139,12 @@ extern int rcu_scheduler_active; * used as well. RCU does not care how the writers keep out of each * others' way, as long as they do so. */ -#define rcu_read_unlock() __rcu_read_unlock() +static inline void rcu_read_unlock(void) +{ + rcu_read_release(); + __release(RCU); + __rcu_read_unlock(); +} /** * rcu_read_lock_bh - mark the beginning of a softirq-only RCU critical section @@ -129,14 +157,24 @@ extern int rcu_scheduler_active; * can use just rcu_read_lock(). * */ -#define rcu_read_lock_bh() __rcu_read_lock_bh() +static inline void rcu_read_lock_bh(void) +{ + __rcu_read_lock_bh(); + __acquire(RCU_BH); + rcu_read_acquire(); +} /* * rcu_read_unlock_bh - marks the end of a softirq-only RCU critical section * * See rcu_read_lock_bh() for more information. */ -#define rcu_read_unlock_bh() __rcu_read_unlock_bh() +static inline void rcu_read_unlock_bh(void) +{ + rcu_read_release(); + __release(RCU_BH); + __rcu_read_unlock_bh(); +} /** * rcu_read_lock_sched - mark the beginning of a RCU-classic critical section @@ -147,17 +185,34 @@ extern int rcu_scheduler_active; * - call_rcu_sched() and rcu_barrier_sched() * on the write-side to insure proper synchronization. */ -#define rcu_read_lock_sched() preempt_disable() -#define rcu_read_lock_sched_notrace() preempt_disable_notrace() +static inline void rcu_read_lock_sched(void) +{ + preempt_disable(); + __acquire(RCU_SCHED); + rcu_read_acquire(); +} +static inline notrace void rcu_read_lock_sched_notrace(void) +{ + preempt_disable_notrace(); + __acquire(RCU_SCHED); +} /* * rcu_read_unlock_sched - marks the end of a RCU-classic critical section * * See rcu_read_lock_sched for more information. */ -#define rcu_read_unlock_sched() preempt_enable() -#define rcu_read_unlock_sched_notrace() preempt_enable_notrace() - +static inline void rcu_read_unlock_sched(void) +{ + rcu_read_release(); + __release(RCU_SCHED); + preempt_enable(); +} +static inline notrace void rcu_read_unlock_sched_notrace(void) +{ + __release(RCU_SCHED); + preempt_enable_notrace(); +} /** @@ -259,15 +314,4 @@ extern void call_rcu(struct rcu_head *head, extern void call_rcu_bh(struct rcu_head *head, void (*func)(struct rcu_head *head)); -/* Exported common interfaces */ -extern void synchronize_rcu(void); -extern void rcu_barrier(void); -extern void rcu_barrier_bh(void); -extern void rcu_barrier_sched(void); - -/* Internal to kernel */ -extern void rcu_init(void); -extern void rcu_scheduler_starting(void); -extern int rcu_needs_cpu(int cpu); - #endif /* __LINUX_RCUPDATE_H */ diff --git a/include/linux/rcupreempt.h b/include/linux/rcupreempt.h deleted file mode 100644 index fce522782ffa..000000000000 --- a/include/linux/rcupreempt.h +++ /dev/null @@ -1,127 +0,0 @@ -/* - * Read-Copy Update mechanism for mutual exclusion (RT implementation) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright (C) IBM Corporation, 2006 - * - * Author: Paul McKenney <paulmck@us.ibm.com> - * - * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com> - * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. - * Papers: - * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf - * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) - * - * For detailed explanation of Read-Copy Update mechanism see - - * Documentation/RCU - * - */ - -#ifndef __LINUX_RCUPREEMPT_H -#define __LINUX_RCUPREEMPT_H - -#include <linux/cache.h> -#include <linux/spinlock.h> -#include <linux/threads.h> -#include <linux/smp.h> -#include <linux/cpumask.h> -#include <linux/seqlock.h> - -extern void rcu_qsctr_inc(int cpu); -static inline void rcu_bh_qsctr_inc(int cpu) { } - -/* - * Someone might want to pass call_rcu_bh as a function pointer. - * So this needs to just be a rename and not a macro function. - * (no parentheses) - */ -#define call_rcu_bh call_rcu - -/** - * call_rcu_sched - Queue RCU callback for invocation after sched grace period. - * @head: structure to be used for queueing the RCU updates. - * @func: actual update function to be invoked after the grace period - * - * The update function will be invoked some time after a full - * synchronize_sched()-style grace period elapses, in other words after - * all currently executing preempt-disabled sections of code (including - * hardirq handlers, NMI handlers, and local_irq_save() blocks) have - * completed. - */ -extern void call_rcu_sched(struct rcu_head *head, - void (*func)(struct rcu_head *head)); - -extern void __rcu_read_lock(void) __acquires(RCU); -extern void __rcu_read_unlock(void) __releases(RCU); -extern int rcu_pending(int cpu); -extern int rcu_needs_cpu(int cpu); - -#define __rcu_read_lock_bh() { rcu_read_lock(); local_bh_disable(); } -#define __rcu_read_unlock_bh() { local_bh_enable(); rcu_read_unlock(); } - -extern void __synchronize_sched(void); - -extern void __rcu_init(void); -extern void rcu_init_sched(void); -extern void rcu_check_callbacks(int cpu, int user); -extern void rcu_restart_cpu(int cpu); -extern long rcu_batches_completed(void); - -/* - * Return the number of RCU batches processed thus far. Useful for debug - * and statistic. The _bh variant is identifcal to straight RCU - */ -static inline long rcu_batches_completed_bh(void) -{ - return rcu_batches_completed(); -} - -#ifdef CONFIG_RCU_TRACE -struct rcupreempt_trace; -extern long *rcupreempt_flipctr(int cpu); -extern long rcupreempt_data_completed(void); -extern int rcupreempt_flip_flag(int cpu); -extern int rcupreempt_mb_flag(int cpu); -extern char *rcupreempt_try_flip_state_name(void); -extern struct rcupreempt_trace *rcupreempt_trace_cpu(int cpu); -#endif - -struct softirq_action; - -#ifdef CONFIG_NO_HZ -extern void rcu_enter_nohz(void); -extern void rcu_exit_nohz(void); -#else -# define rcu_enter_nohz() do { } while (0) -# define rcu_exit_nohz() do { } while (0) -#endif - -/* - * A context switch is a grace period for rcupreempt synchronize_rcu() - * only during early boot, before the scheduler has been initialized. - * So, how the heck do we get a context switch? Well, if the caller - * invokes synchronize_rcu(), they are willing to accept a context - * switch, so we simply pretend that one happened. - * - * After boot, there might be a blocked or preempted task in an RCU - * read-side critical section, so we cannot then take the fastpath. - */ -static inline int rcu_blocking_is_gp(void) -{ - return num_online_cpus() == 1 && !rcu_scheduler_active; -} - -#endif /* __LINUX_RCUPREEMPT_H */ diff --git a/include/linux/rcupreempt_trace.h b/include/linux/rcupreempt_trace.h deleted file mode 100644 index b99ae073192a..000000000000 --- a/include/linux/rcupreempt_trace.h +++ /dev/null @@ -1,97 +0,0 @@ -/* - * Read-Copy Update mechanism for mutual exclusion (RT implementation) - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License as published by - * the Free Software Foundation; either version 2 of the License, or - * (at your option) any later version. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, write to the Free Software - * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. - * - * Copyright (C) IBM Corporation, 2006 - * - * Author: Paul McKenney <paulmck@us.ibm.com> - * - * Based on the original work by Paul McKenney <paul.mckenney@us.ibm.com> - * and inputs from Rusty Russell, Andrea Arcangeli and Andi Kleen. - * Papers: - * http://www.rdrop.com/users/paulmck/paper/rclockpdcsproof.pdf - * http://lse.sourceforge.net/locking/rclock_OLS.2001.05.01c.sc.pdf (OLS2001) - * - * For detailed explanation of the Preemptible Read-Copy Update mechanism see - - * http://lwn.net/Articles/253651/ - */ - -#ifndef __LINUX_RCUPREEMPT_TRACE_H -#define __LINUX_RCUPREEMPT_TRACE_H - -#include <linux/types.h> -#include <linux/kernel.h> - -#include <asm/atomic.h> - -/* - * PREEMPT_RCU data structures. - */ - -struct rcupreempt_trace { - long next_length; - long next_add; - long wait_length; - long wait_add; - long done_length; - long done_add; - long done_remove; - atomic_t done_invoked; - long rcu_check_callbacks; - atomic_t rcu_try_flip_1; - atomic_t rcu_try_flip_e1; - long rcu_try_flip_i1; - long rcu_try_flip_ie1; - long rcu_try_flip_g1; - long rcu_try_flip_a1; - long rcu_try_flip_ae1; - long rcu_try_flip_a2; - long rcu_try_flip_z1; - long rcu_try_flip_ze1; - long rcu_try_flip_z2; - long rcu_try_flip_m1; - long rcu_try_flip_me1; - long rcu_try_flip_m2; -}; - -#ifdef CONFIG_RCU_TRACE -#define RCU_TRACE(fn, arg) fn(arg); -#else -#define RCU_TRACE(fn, arg) -#endif - -extern void rcupreempt_trace_move2done(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_move2wait(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_1(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_e1(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_i1(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_ie1(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_g1(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_a1(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_ae1(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_a2(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_z1(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_ze1(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_z2(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_m1(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_me1(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_try_flip_m2(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_check_callbacks(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_done_remove(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_invoke(struct rcupreempt_trace *trace); -extern void rcupreempt_trace_next_add(struct rcupreempt_trace *trace); - -#endif /* __LINUX_RCUPREEMPT_TRACE_H */ diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h index 5a5153806c42..a89307717825 100644 --- a/include/linux/rcutree.h +++ b/include/linux/rcutree.h @@ -30,264 +30,57 @@ #ifndef __LINUX_RCUTREE_H #define __LINUX_RCUTREE_H -#include <linux/cache.h> -#include <linux/spinlock.h> -#include <linux/threads.h> -#include <linux/cpumask.h> -#include <linux/seqlock.h> +extern void rcu_sched_qs(int cpu); +extern void rcu_bh_qs(int cpu); -/* - * Define shape of hierarchy based on NR_CPUS and CONFIG_RCU_FANOUT. - * In theory, it should be possible to add more levels straightforwardly. - * In practice, this has not been tested, so there is probably some - * bug somewhere. - */ -#define MAX_RCU_LVLS 3 -#define RCU_FANOUT (CONFIG_RCU_FANOUT) -#define RCU_FANOUT_SQ (RCU_FANOUT * RCU_FANOUT) -#define RCU_FANOUT_CUBE (RCU_FANOUT_SQ * RCU_FANOUT) - -#if NR_CPUS <= RCU_FANOUT -# define NUM_RCU_LVLS 1 -# define NUM_RCU_LVL_0 1 -# define NUM_RCU_LVL_1 (NR_CPUS) -# define NUM_RCU_LVL_2 0 -# define NUM_RCU_LVL_3 0 -#elif NR_CPUS <= RCU_FANOUT_SQ -# define NUM_RCU_LVLS 2 -# define NUM_RCU_LVL_0 1 -# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT - 1) / RCU_FANOUT) -# define NUM_RCU_LVL_2 (NR_CPUS) -# define NUM_RCU_LVL_3 0 -#elif NR_CPUS <= RCU_FANOUT_CUBE -# define NUM_RCU_LVLS 3 -# define NUM_RCU_LVL_0 1 -# define NUM_RCU_LVL_1 (((NR_CPUS) + RCU_FANOUT_SQ - 1) / RCU_FANOUT_SQ) -# define NUM_RCU_LVL_2 (((NR_CPUS) + (RCU_FANOUT) - 1) / (RCU_FANOUT)) -# define NUM_RCU_LVL_3 NR_CPUS -#else -# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS" -#endif /* #if (NR_CPUS) <= RCU_FANOUT */ - -#define RCU_SUM (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3) -#define NUM_RCU_NODES (RCU_SUM - NR_CPUS) - -/* - * Dynticks per-CPU state. - */ -struct rcu_dynticks { - int dynticks_nesting; /* Track nesting level, sort of. */ - int dynticks; /* Even value for dynticks-idle, else odd. */ - int dynticks_nmi; /* Even value for either dynticks-idle or */ - /* not in nmi handler, else odd. So this */ - /* remains even for nmi from irq handler. */ -}; - -/* - * Definition for node within the RCU grace-period-detection hierarchy. - */ -struct rcu_node { - spinlock_t lock; - unsigned long qsmask; /* CPUs or groups that need to switch in */ - /* order for current grace period to proceed.*/ - unsigned long qsmaskinit; - /* Per-GP initialization for qsmask. */ - unsigned long grpmask; /* Mask to apply to parent qsmask. */ - int grplo; /* lowest-numbered CPU or group here. */ - int grphi; /* highest-numbered CPU or group here. */ - u8 grpnum; /* CPU/group number for next level up. */ - u8 level; /* root is at level 0. */ - struct rcu_node *parent; -} ____cacheline_internodealigned_in_smp; - -/* Index values for nxttail array in struct rcu_data. */ -#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */ -#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */ -#define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */ -#define RCU_NEXT_TAIL 3 -#define RCU_NEXT_SIZE 4 - -/* Per-CPU data for read-copy update. */ -struct rcu_data { - /* 1) quiescent-state and grace-period handling : */ - long completed; /* Track rsp->completed gp number */ - /* in order to detect GP end. */ - long gpnum; /* Highest gp number that this CPU */ - /* is aware of having started. */ - long passed_quiesc_completed; - /* Value of completed at time of qs. */ - bool passed_quiesc; /* User-mode/idle loop etc. */ - bool qs_pending; /* Core waits for quiesc state. */ - bool beenonline; /* CPU online at least once. */ - struct rcu_node *mynode; /* This CPU's leaf of hierarchy */ - unsigned long grpmask; /* Mask to apply to leaf qsmask. */ - - /* 2) batch handling */ - /* - * If nxtlist is not NULL, it is partitioned as follows. - * Any of the partitions might be empty, in which case the - * pointer to that partition will be equal to the pointer for - * the following partition. When the list is empty, all of - * the nxttail elements point to nxtlist, which is NULL. - * - * [*nxttail[RCU_NEXT_READY_TAIL], NULL = *nxttail[RCU_NEXT_TAIL]): - * Entries that might have arrived after current GP ended - * [*nxttail[RCU_WAIT_TAIL], *nxttail[RCU_NEXT_READY_TAIL]): - * Entries known to have arrived before current GP ended - * [*nxttail[RCU_DONE_TAIL], *nxttail[RCU_WAIT_TAIL]): - * Entries that batch # <= ->completed - 1: waiting for current GP - * [nxtlist, *nxttail[RCU_DONE_TAIL]): - * Entries that batch # <= ->completed - * The grace period for these entries has completed, and - * the other grace-period-completed entries may be moved - * here temporarily in rcu_process_callbacks(). - */ - struct rcu_head *nxtlist; - struct rcu_head **nxttail[RCU_NEXT_SIZE]; - long qlen; /* # of queued callbacks */ - long blimit; /* Upper limit on a processed batch */ - -#ifdef CONFIG_NO_HZ - /* 3) dynticks interface. */ - struct rcu_dynticks *dynticks; /* Shared per-CPU dynticks state. */ - int dynticks_snap; /* Per-GP tracking for dynticks. */ - int dynticks_nmi_snap; /* Per-GP tracking for dynticks_nmi. */ -#endif /* #ifdef CONFIG_NO_HZ */ - - /* 4) reasons this CPU needed to be kicked by force_quiescent_state */ -#ifdef CONFIG_NO_HZ - unsigned long dynticks_fqs; /* Kicked due to dynticks idle. */ -#endif /* #ifdef CONFIG_NO_HZ */ - unsigned long offline_fqs; /* Kicked due to being offline. */ - unsigned long resched_ipi; /* Sent a resched IPI. */ - - /* 5) __rcu_pending() statistics. */ - long n_rcu_pending; /* rcu_pending() calls since boot. */ - long n_rp_qs_pending; - long n_rp_cb_ready; - long n_rp_cpu_needs_gp; - long n_rp_gp_completed; - long n_rp_gp_started; - long n_rp_need_fqs; - long n_rp_need_nothing; - - int cpu; -}; - -/* Values for signaled field in struct rcu_state. */ -#define RCU_GP_INIT 0 /* Grace period being initialized. */ -#define RCU_SAVE_DYNTICK 1 /* Need to scan dyntick state. */ -#define RCU_FORCE_QS 2 /* Need to force quiescent state. */ -#ifdef CONFIG_NO_HZ -#define RCU_SIGNAL_INIT RCU_SAVE_DYNTICK -#else /* #ifdef CONFIG_NO_HZ */ -#define RCU_SIGNAL_INIT RCU_FORCE_QS -#endif /* #else #ifdef CONFIG_NO_HZ */ - -#define RCU_JIFFIES_TILL_FORCE_QS 3 /* for rsp->jiffies_force_qs */ -#ifdef CONFIG_RCU_CPU_STALL_DETECTOR -#define RCU_SECONDS_TILL_STALL_CHECK (10 * HZ) /* for rsp->jiffies_stall */ -#define RCU_SECONDS_TILL_STALL_RECHECK (30 * HZ) /* for rsp->jiffies_stall */ -#define RCU_STALL_RAT_DELAY 2 /* Allow other CPUs time */ - /* to take at least one */ - /* scheduling clock irq */ - /* before ratting on them. */ - -#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ - -/* - * RCU global state, including node hierarchy. This hierarchy is - * represented in "heap" form in a dense array. The root (first level) - * of the hierarchy is in ->node[0] (referenced by ->level[0]), the second - * level in ->node[1] through ->node[m] (->node[1] referenced by ->level[1]), - * and the third level in ->node[m+1] and following (->node[m+1] referenced - * by ->level[2]). The number of levels is determined by the number of - * CPUs and by CONFIG_RCU_FANOUT. Small systems will have a "hierarchy" - * consisting of a single rcu_node. - */ -struct rcu_state { - struct rcu_node node[NUM_RCU_NODES]; /* Hierarchy. */ - struct rcu_node *level[NUM_RCU_LVLS]; /* Hierarchy levels. */ - u32 levelcnt[MAX_RCU_LVLS + 1]; /* # nodes in each level. */ - u8 levelspread[NUM_RCU_LVLS]; /* kids/node in each level. */ - struct rcu_data *rda[NR_CPUS]; /* array of rdp pointers. */ - - /* The following fields are guarded by the root rcu_node's lock. */ - - u8 signaled ____cacheline_internodealigned_in_smp; - /* Force QS state. */ - long gpnum; /* Current gp number. */ - long completed; /* # of last completed gp. */ - spinlock_t onofflock; /* exclude on/offline and */ - /* starting new GP. */ - spinlock_t fqslock; /* Only one task forcing */ - /* quiescent states. */ - unsigned long jiffies_force_qs; /* Time at which to invoke */ - /* force_quiescent_state(). */ - unsigned long n_force_qs; /* Number of calls to */ - /* force_quiescent_state(). */ - unsigned long n_force_qs_lh; /* ~Number of calls leaving */ - /* due to lock unavailable. */ - unsigned long n_force_qs_ngp; /* Number of calls leaving */ - /* due to no GP active. */ -#ifdef CONFIG_RCU_CPU_STALL_DETECTOR - unsigned long gp_start; /* Time at which GP started, */ - /* but in jiffies. */ - unsigned long jiffies_stall; /* Time at which to check */ - /* for CPU stalls. */ -#endif /* #ifdef CONFIG_RCU_CPU_STALL_DETECTOR */ -#ifdef CONFIG_NO_HZ - long dynticks_completed; /* Value of completed @ snap. */ -#endif /* #ifdef CONFIG_NO_HZ */ -}; +extern int rcu_needs_cpu(int cpu); -extern void rcu_qsctr_inc(int cpu); -extern void rcu_bh_qsctr_inc(int cpu); +#ifdef CONFIG_TREE_PREEMPT_RCU -extern int rcu_pending(int cpu); -extern int rcu_needs_cpu(int cpu); +extern void __rcu_read_lock(void); +extern void __rcu_read_unlock(void); +extern void exit_rcu(void); -#ifdef CONFIG_DEBUG_LOCK_ALLOC -extern struct lockdep_map rcu_lock_map; -# define rcu_read_acquire() \ - lock_acquire(&rcu_lock_map, 0, 0, 2, 1, NULL, _THIS_IP_) -# define rcu_read_release() lock_release(&rcu_lock_map, 1, _THIS_IP_) -#else -# define rcu_read_acquire() do { } while (0) -# define rcu_read_release() do { } while (0) -#endif +#else /* #ifdef CONFIG_TREE_PREEMPT_RCU */ static inline void __rcu_read_lock(void) { preempt_disable(); - __acquire(RCU); - rcu_read_acquire(); } + static inline void __rcu_read_unlock(void) { - rcu_read_release(); - __release(RCU); preempt_enable(); } + +static inline void exit_rcu(void) +{ +} + +#endif /* #else #ifdef CONFIG_TREE_PREEMPT_RCU */ + static inline void __rcu_read_lock_bh(void) { local_bh_disable(); - __acquire(RCU_BH); - rcu_read_acquire(); } static inline void __rcu_read_unlock_bh(void) { - rcu_read_release(); - __release(RCU_BH); local_bh_enable(); } #define __synchronize_sched() synchronize_rcu() -#define call_rcu_sched(head, func) call_rcu(head, func) +extern void call_rcu_sched(struct rcu_head *head, + void (*func)(struct rcu_head *rcu)); -static inline void rcu_init_sched(void) +static inline void synchronize_rcu_expedited(void) { + synchronize_sched_expedited(); +} + +static inline void synchronize_rcu_bh_expedited(void) +{ + synchronize_sched_expedited(); } extern void __rcu_init(void); @@ -296,6 +89,11 @@ extern void rcu_restart_cpu(int cpu); extern long rcu_batches_completed(void); extern long rcu_batches_completed_bh(void); +extern long rcu_batches_completed_sched(void); + +static inline void rcu_init_sched(void) +{ +} #ifdef CONFIG_NO_HZ void rcu_enter_nohz(void); diff --git a/include/linux/sched.h b/include/linux/sched.h index 9304027673b0..379531c08975 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1163,6 +1163,8 @@ struct sched_rt_entity { #endif }; +struct rcu_node; + struct task_struct { volatile long state; /* -1 unrunnable, 0 runnable, >0 stopped */ void *stack; @@ -1206,10 +1208,12 @@ struct task_struct { unsigned int policy; cpumask_t cpus_allowed; -#ifdef CONFIG_PREEMPT_RCU +#ifdef CONFIG_TREE_PREEMPT_RCU int rcu_read_lock_nesting; - int rcu_flipctr_idx; -#endif /* #ifdef CONFIG_PREEMPT_RCU */ + char rcu_read_unlock_special; + struct rcu_node *rcu_blocked_node; + struct list_head rcu_node_entry; +#endif /* #ifdef CONFIG_TREE_PREEMPT_RCU */ #if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_TASK_DELAY_ACCT) struct sched_info sched_info; @@ -1725,6 +1729,28 @@ extern cputime_t task_gtime(struct task_struct *p); #define tsk_used_math(p) ((p)->flags & PF_USED_MATH) #define used_math() tsk_used_math(current) +#ifdef CONFIG_TREE_PREEMPT_RCU + +#define RCU_READ_UNLOCK_BLOCKED (1 << 0) /* blocked while in RCU read-side. */ +#define RCU_READ_UNLOCK_NEED_QS (1 << 1) /* RCU core needs CPU response. */ +#define RCU_READ_UNLOCK_GOT_QS (1 << 2) /* CPU has responded to RCU core. */ + +static inline void rcu_copy_process(struct task_struct *p) +{ + p->rcu_read_lock_nesting = 0; + p->rcu_read_unlock_special = 0; + p->rcu_blocked_node = NULL; + INIT_LIST_HEAD(&p->rcu_node_entry); +} + +#else + +static inline void rcu_copy_process(struct task_struct *p) +{ +} + +#endif + #ifdef CONFIG_SMP extern int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask); |