summaryrefslogtreecommitdiffstats
path: root/mm/mempolicy.c
diff options
context:
space:
mode:
authorOleg Nesterov <oleg@redhat.com>2014-10-09 15:27:45 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2014-10-09 22:25:56 -0400
commit8d90274b3b118c9babeefb1302947f33a1364fb5 (patch)
treed500cea8a52cf24049f3f41f4ed24ba2f5d3c71a /mm/mempolicy.c
parentf15ca78e33b0bb5acc0c5d9a5d5be3c55c4f0bb7 (diff)
downloadlinux-stable-8d90274b3b118c9babeefb1302947f33a1364fb5.tar.gz
linux-stable-8d90274b3b118c9babeefb1302947f33a1364fb5.tar.bz2
linux-stable-8d90274b3b118c9babeefb1302947f33a1364fb5.zip
mempolicy: sanitize the usage of get_task_policy()
Cleanup + preparation. Every user of get_task_policy() calls it unconditionally, even if it is not going to use the result. get_task_policy() is cheap but still this does not look clean, plus the code looks simpler if get_task_policy() is called only when this is really needed. Note: I hope this is correct, but it is not clear why vma_policy_mof() doesn't fall back to get_task_policy() if ->get_policy() returns NULL. Signed-off-by: Oleg Nesterov <oleg@redhat.com> Cc: KAMEZAWA Hiroyuki <kamezawa.hiroyu@jp.fujitsu.com> Cc: David Rientjes <rientjes@google.com> Cc: KOSAKI Motohiro <kosaki.motohiro@jp.fujitsu.com> Cc: Alexander Viro <viro@zeniv.linux.org.uk> Cc: Cyrill Gorcunov <gorcunov@openvz.org> Cc: "Eric W. Biederman" <ebiederm@xmission.com> Cc: "Kirill A. Shutemov" <kirill.shutemov@linux.intel.com> Cc: Peter Zijlstra <peterz@infradead.org> Cc: Hugh Dickins <hughd@google.com> Cc: Andi Kleen <andi@firstfloor.org> Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/mempolicy.c')
-rw-r--r--mm/mempolicy.c25
1 files changed, 14 insertions, 11 deletions
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index 656db97584f0..b86b08e77b8d 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1621,14 +1621,11 @@ COMPAT_SYSCALL_DEFINE6(mbind, compat_ulong_t, start, compat_ulong_t, len,
struct mempolicy *get_vma_policy(struct task_struct *task,
struct vm_area_struct *vma, unsigned long addr)
{
- struct mempolicy *pol = get_task_policy(task);
+ struct mempolicy *pol = NULL;
if (vma) {
if (vma->vm_ops && vma->vm_ops->get_policy) {
- struct mempolicy *vpol = vma->vm_ops->get_policy(vma,
- addr);
- if (vpol)
- pol = vpol;
+ pol = vma->vm_ops->get_policy(vma, addr);
} else if (vma->vm_policy) {
pol = vma->vm_policy;
@@ -1643,12 +1640,15 @@ struct mempolicy *get_vma_policy(struct task_struct *task,
}
}
+ if (!pol)
+ pol = get_task_policy(task);
+
return pol;
}
bool vma_policy_mof(struct task_struct *task, struct vm_area_struct *vma)
{
- struct mempolicy *pol = get_task_policy(task);
+ struct mempolicy *pol = NULL;
if (vma) {
if (vma->vm_ops && vma->vm_ops->get_policy) {
@@ -1660,11 +1660,14 @@ bool vma_policy_mof(struct task_struct *task, struct vm_area_struct *vma)
mpol_cond_put(pol);
return ret;
- } else if (vma->vm_policy) {
- pol = vma->vm_policy;
}
+
+ pol = vma->vm_policy;
}
+ if (!pol)
+ pol = get_task_policy(task);
+
return pol->flags & MPOL_F_MOF;
}
@@ -2068,12 +2071,12 @@ retry_cpuset:
*/
struct page *alloc_pages_current(gfp_t gfp, unsigned order)
{
- struct mempolicy *pol = get_task_policy(current);
+ struct mempolicy *pol = &default_policy;
struct page *page;
unsigned int cpuset_mems_cookie;
- if (in_interrupt() || (gfp & __GFP_THISNODE))
- pol = &default_policy;
+ if (!in_interrupt() && !(gfp & __GFP_THISNODE))
+ pol = get_task_policy(current);
retry_cpuset:
cpuset_mems_cookie = read_mems_allowed_begin();