diff options
author | David Rientjes <rientjes@google.com> | 2008-04-28 02:12:34 -0700 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2008-04-28 08:58:20 -0700 |
commit | 3e1f064562fcff7bf3856bc1d00dfa84d4f121cc (patch) | |
tree | 9ebc17449238ab5284b72f634405044376dc816b /mm/mempolicy.c | |
parent | 3842b46de626d1a3c44ad280d67ab0a4dc047d13 (diff) | |
download | linux-3e1f064562fcff7bf3856bc1d00dfa84d4f121cc.tar.gz linux-3e1f064562fcff7bf3856bc1d00dfa84d4f121cc.tar.bz2 linux-3e1f064562fcff7bf3856bc1d00dfa84d4f121cc.zip |
mempolicy: disallow static or relative flags for local preferred mode
MPOL_F_STATIC_NODES and MPOL_F_RELATIVE_NODES don't mean anything for
MPOL_PREFERRED policies that were created with an empty nodemask (for purely
local allocations). They'll never be invalidated because the allowed mems of
a task changes or need to be rebound relative to a cpuset's placement.
Also fixes a bug identified by Lee Schermerhorn that disallowed empty
nodemasks to be passed to MPOL_PREFERRED to specify local allocations. [A
different, somewhat incomplete, patch already existed in 25-rc5-mm1.]
Cc: Paul Jackson <pj@sgi.com>
Cc: Christoph Lameter <clameter@sgi.com>
Cc: Lee Schermerhorn <Lee.Schermerhorn@hp.com>
Cc: Andi Kleen <ak@suse.de>
Cc: Randy Dunlap <randy.dunlap@oracle.com>
Signed-off-by: Lee Schermerhorn <lee.schermerhorn@hp.com>
Signed-off-by: David Rientjes <rientjes@google.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mempolicy.c')
-rw-r--r-- | mm/mempolicy.c | 42 |
1 files changed, 26 insertions, 16 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c index a94d994eaaa8..c1b907789d84 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -181,27 +181,43 @@ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, { struct mempolicy *policy; nodemask_t cpuset_context_nmask; - int localalloc = 0; int ret; pr_debug("setting mode %d flags %d nodes[0] %lx\n", mode, flags, nodes ? nodes_addr(*nodes)[0] : -1); - if (mode == MPOL_DEFAULT) - return NULL; - if (!nodes || nodes_empty(*nodes)) { - if (mode != MPOL_PREFERRED) + if (mode == MPOL_DEFAULT) { + if (nodes && !nodes_empty(*nodes)) return ERR_PTR(-EINVAL); - localalloc = 1; /* special case: no mode flags */ + return NULL; } + VM_BUG_ON(!nodes); + + /* + * MPOL_PREFERRED cannot be used with MPOL_F_STATIC_NODES or + * MPOL_F_RELATIVE_NODES if the nodemask is empty (local allocation). + * All other modes require a valid pointer to a non-empty nodemask. + */ + if (mode == MPOL_PREFERRED) { + if (nodes_empty(*nodes)) { + if (((flags & MPOL_F_STATIC_NODES) || + (flags & MPOL_F_RELATIVE_NODES))) + return ERR_PTR(-EINVAL); + nodes = NULL; /* flag local alloc */ + } + } else if (nodes_empty(*nodes)) + return ERR_PTR(-EINVAL); policy = kmem_cache_alloc(policy_cache, GFP_KERNEL); if (!policy) return ERR_PTR(-ENOMEM); atomic_set(&policy->refcnt, 1); policy->policy = mode; + policy->flags = flags; - if (!localalloc) { - policy->flags = flags; + if (nodes) { + /* + * cpuset related setup doesn't apply to local allocation + */ cpuset_update_task_memory_state(); if (flags & MPOL_F_RELATIVE_NODES) mpol_relative_nodemask(&cpuset_context_nmask, nodes, @@ -217,7 +233,7 @@ static struct mempolicy *mpol_new(unsigned short mode, unsigned short flags, } ret = mpol_ops[mode].create(policy, - localalloc ? NULL : &cpuset_context_nmask); + nodes ? &cpuset_context_nmask : NULL); if (ret < 0) { kmem_cache_free(policy_cache, policy); return ERR_PTR(ret); @@ -259,10 +275,6 @@ static void mpol_rebind_preferred(struct mempolicy *pol, { nodemask_t tmp; - /* - * check 'STATIC_NODES first, as preferred_node == -1 may be - * a temporary, "fallback" state for this policy. - */ if (pol->flags & MPOL_F_STATIC_NODES) { int node = first_node(pol->w.user_nodemask); @@ -270,12 +282,10 @@ static void mpol_rebind_preferred(struct mempolicy *pol, pol->v.preferred_node = node; else pol->v.preferred_node = -1; - } else if (pol->v.preferred_node == -1) { - return; /* no remap required for explicit local alloc */ } else if (pol->flags & MPOL_F_RELATIVE_NODES) { mpol_relative_nodemask(&tmp, &pol->w.user_nodemask, nodes); pol->v.preferred_node = first_node(tmp); - } else { + } else if (pol->v.preferred_node != -1) { pol->v.preferred_node = node_remap(pol->v.preferred_node, pol->w.cpuset_mems_allowed, *nodes); |