diff options
author | Michal Hocko <mhocko@suse.com> | 2018-01-31 16:21:03 -0800 |
---|---|---|
committer | Linus Torvalds <torvalds@linux-foundation.org> | 2018-01-31 17:18:40 -0800 |
commit | 389c8178d0904f944887ccca2256ff9d79c12e8e (patch) | |
tree | b4b089a0b4406d0215237ecc28f4f34c14958a70 | |
parent | ebd637235890a3fa6a6d4bb57522098f2f59c693 (diff) | |
download | linux-389c8178d0904f944887ccca2256ff9d79c12e8e.tar.gz linux-389c8178d0904f944887ccca2256ff9d79c12e8e.tar.bz2 linux-389c8178d0904f944887ccca2256ff9d79c12e8e.zip |
hugetlb, mbind: fall back to default policy if vma is NULL
Dan Carpenter has noticed that mbind migration callback (new_page) can
get a NULL vma pointer and choke on it inside alloc_huge_page_vma which
relies on the VMA to get the hstate. We used to BUG_ON this case but
the BUG_+ON has been removed recently by "hugetlb, mempolicy: fix the
mbind hugetlb migration".
The proper way to handle this is to get the hstate from the migrated
page and rely on huge_node (resp. get_vma_policy) do the right thing
with null VMA. We are currently falling back to the default mempolicy
in that case which is in line what THP path is doing here.
Link: http://lkml.kernel.org/r/20180110104712.GR1732@dhcp22.suse.cz
Signed-off-by: Michal Hocko <mhocko@suse.com>
Reported-by: Dan Carpenter <dan.carpenter@oracle.com>
Cc: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com>
Cc: Mike Kravetz <mike.kravetz@oracle.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r-- | include/linux/hugetlb.h | 5 | ||||
-rw-r--r-- | mm/hugetlb.c | 5 | ||||
-rw-r--r-- | mm/mempolicy.c | 3 |
3 files changed, 7 insertions, 6 deletions
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h index 612a29b7f6c6..36fa6a2a82e3 100644 --- a/include/linux/hugetlb.h +++ b/include/linux/hugetlb.h @@ -358,7 +358,8 @@ struct page *alloc_huge_page(struct vm_area_struct *vma, struct page *alloc_huge_page_node(struct hstate *h, int nid); struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, nodemask_t *nmask); -struct page *alloc_huge_page_vma(struct vm_area_struct *vma, unsigned long address); +struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, + unsigned long address); int huge_add_to_page_cache(struct page *page, struct address_space *mapping, pgoff_t idx); @@ -536,7 +537,7 @@ struct hstate {}; #define alloc_huge_page(v, a, r) NULL #define alloc_huge_page_node(h, nid) NULL #define alloc_huge_page_nodemask(h, preferred_nid, nmask) NULL -#define alloc_huge_page_vma(vma, address) NULL +#define alloc_huge_page_vma(h, vma, address) NULL #define alloc_bootmem_huge_page(h) NULL #define hstate_file(f) NULL #define hstate_sizelog(s) NULL diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 742a929f2311..7c204e3d132b 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -1675,16 +1675,15 @@ struct page *alloc_huge_page_nodemask(struct hstate *h, int preferred_nid, } /* mempolicy aware migration callback */ -struct page *alloc_huge_page_vma(struct vm_area_struct *vma, unsigned long address) +struct page *alloc_huge_page_vma(struct hstate *h, struct vm_area_struct *vma, + unsigned long address) { struct mempolicy *mpol; nodemask_t *nodemask; struct page *page; - struct hstate *h; gfp_t gfp_mask; int node; - h = hstate_vma(vma); gfp_mask = htlb_alloc_mask(h); node = huge_node(vma, address, gfp_mask, &mpol, &nodemask); page = alloc_huge_page_nodemask(h, node, nodemask); diff --git a/mm/mempolicy.c b/mm/mempolicy.c index 96823fa07f38..d879f1d8a44a 100644 --- a/mm/mempolicy.c +++ b/mm/mempolicy.c @@ -1121,7 +1121,8 @@ static struct page *new_page(struct page *page, unsigned long start, int **x) } if (PageHuge(page)) { - return alloc_huge_page_vma(vma, address); + return alloc_huge_page_vma(page_hstate(compound_head(page)), + vma, address); } else if (thp_migration_supported() && PageTransHuge(page)) { struct page *thp; |