summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorRusty Russell <rusty@rustcorp.com.au>2008-11-25 02:35:11 +1030
committerIngo Molnar <mingo@elte.hu>2008-11-24 17:51:59 +0100
commit5a16f3d30ca4e3f166d691220c003066a14e32b5 (patch)
tree8e65fa8dc91f12a6be37c36d71bb48d2c97d6447
parente76bd8d9850c2296a7e8e24c9dce9b5e6b55fe2f (diff)
downloadlinux-5a16f3d30ca4e3f166d691220c003066a14e32b5.tar.gz
linux-5a16f3d30ca4e3f166d691220c003066a14e32b5.tar.bz2
linux-5a16f3d30ca4e3f166d691220c003066a14e32b5.zip
sched: convert struct (sys_)sched_setaffinity() to cpumask_var_t.
Impact: stack usage reduction Dynamically allocating cpumasks (when CONFIG_CPUMASK_OFFSTACK) saves space on the stack. cpumask_var_t is just a struct cpumask for !CONFIG_CPUMASK_OFFSTACK. Note the removal of the initializer of new_mask: since the first thing we did was "cpus_and(new_mask, new_mask, cpus_allowed)" I just changed that to "cpumask_and(new_mask, in_mask, cpus_allowed);". Signed-off-by: Rusty Russell <rusty@rustcorp.com.au> Signed-off-by: Ingo Molnar <mingo@elte.hu>
-rw-r--r--kernel/sched.c40
1 files changed, 27 insertions, 13 deletions
diff --git a/kernel/sched.c b/kernel/sched.c
index f7dee2029e4d..2d4ff91e0c97 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -5378,8 +5378,7 @@ out_unlock:
long sched_setaffinity(pid_t pid, const cpumask_t *in_mask)
{
- cpumask_t cpus_allowed;
- cpumask_t new_mask = *in_mask;
+ cpumask_var_t cpus_allowed, new_mask;
struct task_struct *p;
int retval;
@@ -5401,6 +5400,14 @@ long sched_setaffinity(pid_t pid, const cpumask_t *in_mask)
get_task_struct(p);
read_unlock(&tasklist_lock);
+ if (!alloc_cpumask_var(&cpus_allowed, GFP_KERNEL)) {
+ retval = -ENOMEM;
+ goto out_put_task;
+ }
+ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL)) {
+ retval = -ENOMEM;
+ goto out_free_cpus_allowed;
+ }
retval = -EPERM;
if ((current->euid != p->euid) && (current->euid != p->uid) &&
!capable(CAP_SYS_NICE))
@@ -5410,24 +5417,28 @@ long sched_setaffinity(pid_t pid, const cpumask_t *in_mask)
if (retval)
goto out_unlock;
- cpuset_cpus_allowed(p, &cpus_allowed);
- cpus_and(new_mask, new_mask, cpus_allowed);
+ cpuset_cpus_allowed(p, cpus_allowed);
+ cpumask_and(new_mask, in_mask, cpus_allowed);
again:
- retval = set_cpus_allowed_ptr(p, &new_mask);
+ retval = set_cpus_allowed_ptr(p, new_mask);
if (!retval) {
- cpuset_cpus_allowed(p, &cpus_allowed);
- if (!cpus_subset(new_mask, cpus_allowed)) {
+ cpuset_cpus_allowed(p, cpus_allowed);
+ if (!cpumask_subset(new_mask, cpus_allowed)) {
/*
* We must have raced with a concurrent cpuset
* update. Just reset the cpus_allowed to the
* cpuset's cpus_allowed
*/
- new_mask = cpus_allowed;
+ cpumask_copy(new_mask, cpus_allowed);
goto again;
}
}
out_unlock:
+ free_cpumask_var(new_mask);
+out_free_cpus_allowed:
+ free_cpumask_var(cpus_allowed);
+out_put_task:
put_task_struct(p);
put_online_cpus();
return retval;
@@ -5453,14 +5464,17 @@ static int get_user_cpu_mask(unsigned long __user *user_mask_ptr, unsigned len,
asmlinkage long sys_sched_setaffinity(pid_t pid, unsigned int len,
unsigned long __user *user_mask_ptr)
{
- cpumask_t new_mask;
+ cpumask_var_t new_mask;
int retval;
- retval = get_user_cpu_mask(user_mask_ptr, len, &new_mask);
- if (retval)
- return retval;
+ if (!alloc_cpumask_var(&new_mask, GFP_KERNEL))
+ return -ENOMEM;
- return sched_setaffinity(pid, &new_mask);
+ retval = get_user_cpu_mask(user_mask_ptr, len, new_mask);
+ if (retval == 0)
+ retval = sched_setaffinity(pid, new_mask);
+ free_cpumask_var(new_mask);
+ return retval;
}
long sched_getaffinity(pid_t pid, cpumask_t *mask)