summaryrefslogtreecommitdiffstats
path: root/include
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2008-11-25 02:35:14 +1030
committerIngo Molnar <mingo@elte.hu>2008-11-24 17:52:42 +0100
commit96f874e26428ab5d2db681c100210c254775e154 (patch)
treee18a6f0629ef17f2344f3691c8df4692ccb875fa /include
parent0e3900e6d3b04c44737ebc505604dcd8ed30e354 (diff)
downloadlinux-96f874e26428ab5d2db681c100210c254775e154.tar.gz
linux-96f874e26428ab5d2db681c100210c254775e154.tar.bz2
linux-96f874e26428ab5d2db681c100210c254775e154.zip
sched: convert remaining old-style cpumask operators
Impact: Trivial API conversion NR_CPUS -> nr_cpu_ids cpumask_t -> struct cpumask sizeof(cpumask_t) -> cpumask_size() cpumask_a = cpumask_b -> cpumask_copy(&cpumask_a, &cpumask_b) cpu_set() -> cpumask_set_cpu() first_cpu() -> cpumask_first() cpumask_of_cpu() -> cpumask_of() cpus_* -> cpumask_* There are some FIXMEs where we all archs to complete infrastructure (patches have been sent): cpu_coregroup_map -> cpu_coregroup_mask node_to_cpumask* -> cpumask_of_node There is also one FIXME where we pass an array of cpumasks to partition_sched_domains(): this implies knowing the definition of 'struct cpumask' and the size of a cpumask. This will be fixed in a future patch. Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Ingo Molnar <mingo@elte.hu>
Diffstat (limited to 'include')
-rw-r--r--include/linux/sched.h16
1 files changed, 8 insertions, 8 deletions
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 1e33e2cb7f8c..4b7b0187374c 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -879,7 +879,7 @@ static inline struct cpumask *sched_domain_span(struct sched_domain *sd)
return to_cpumask(sd->span);
}
-extern void partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
+extern void partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
struct sched_domain_attr *dattr_new);
extern int arch_reinit_sched_domains(void);
@@ -888,7 +888,7 @@ extern int arch_reinit_sched_domains(void);
struct sched_domain_attr;
static inline void
-partition_sched_domains(int ndoms_new, cpumask_t *doms_new,
+partition_sched_domains(int ndoms_new, struct cpumask *doms_new,
struct sched_domain_attr *dattr_new)
{
}
@@ -970,7 +970,7 @@ struct sched_class {
void (*task_wake_up) (struct rq *this_rq, struct task_struct *task);
void (*set_cpus_allowed)(struct task_struct *p,
- const cpumask_t *newmask);
+ const struct cpumask *newmask);
void (*rq_online)(struct rq *rq);
void (*rq_offline)(struct rq *rq);
@@ -1612,12 +1612,12 @@ extern cputime_t task_gtime(struct task_struct *p);
#ifdef CONFIG_SMP
extern int set_cpus_allowed_ptr(struct task_struct *p,
- const cpumask_t *new_mask);
+ const struct cpumask *new_mask);
#else
static inline int set_cpus_allowed_ptr(struct task_struct *p,
- const cpumask_t *new_mask)
+ const struct cpumask *new_mask)
{
- if (!cpu_isset(0, *new_mask))
+ if (!cpumask_test_cpu(0, new_mask))
return -EINVAL;
return 0;
}
@@ -2230,8 +2230,8 @@ __trace_special(void *__tr, void *__data,
}
#endif
-extern long sched_setaffinity(pid_t pid, const cpumask_t *new_mask);
-extern long sched_getaffinity(pid_t pid, cpumask_t *mask);
+extern long sched_setaffinity(pid_t pid, const struct cpumask *new_mask);
+extern long sched_getaffinity(pid_t pid, struct cpumask *mask);
extern int sched_mc_power_savings, sched_smt_power_savings;