summaryrefslogtreecommitdiffstats
path: root/include/linux/sched.h
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2008-11-25 02:35:04 +1030
committerIngo Molnar <mingo@elte.hu>2008-11-24 17:50:45 +0100
commit758b2cdc6f6a22c702bd8f2344382fb1270b2161 (patch)
tree270aec3d0f6235c1519c16e8dc8148f195e133db /include/linux/sched.h
parent1e5ce4f4a755ee498bd9217dae26143afa0d8f31 (diff)
downloadlinux-758b2cdc6f6a22c702bd8f2344382fb1270b2161.tar.gz
linux-758b2cdc6f6a22c702bd8f2344382fb1270b2161.tar.bz2
linux-758b2cdc6f6a22c702bd8f2344382fb1270b2161.zip
sched: wrap sched_group and sched_domain cpumask accesses.
Impact: trivial wrap of member accesses This eases the transition in the next patch. We also get rid of a temporary cpumask in find_idlest_cpu() thanks to for_each_cpu_and, and sched_balance_self() due to getting weight before setting sd to NULL. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include/linux/sched.h')
-rw-r--r--include/linux/sched.h10
1 files changed, 10 insertions, 0 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4ce5c603c51a..2b95aa9f779b 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -786,6 +786,11 @@ struct sched_group {
u32 reciprocal_cpu_power;
};
+static inline struct cpumask *sched_group_cpus(struct sched_group *sg)
+{
+ return &sg->cpumask;
+}
+
enum sched_domain_level {
SD_LV_NONE = 0,
SD_LV_SIBLING,
@@ -866,6 +871,11 @@ struct sched_domain {
#endif
};
+static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
+{
+ return &sd->span;
+}
+
extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
struct sched_domain_attr *dattr_new);
extern int arch_reinit_sched_domains(void);