summaryrefslogtreecommitdiffstats
path: root/mm/hugetlb.c
diff options
context:
space:
mode:
authorOscar Salvador <osalvador@suse.de>2019-05-13 17:19:23 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2019-05-14 09:47:48 -0700
commit2d0adf7e0d7ac1e18da874c5b19ef30a0db59658 (patch)
tree85bc0616c14208c09e535f7dbeea174fa72d399e /mm/hugetlb.c
parentfd875dca7c71744cbb0ebbcde7d45e5ee05b7637 (diff)
downloadlinux-stable-2d0adf7e0d7ac1e18da874c5b19ef30a0db59658.tar.gz
linux-stable-2d0adf7e0d7ac1e18da874c5b19ef30a0db59658.tar.bz2
linux-stable-2d0adf7e0d7ac1e18da874c5b19ef30a0db59658.zip
mm/hugetlb: get rid of NODEMASK_ALLOC
NODEMASK_ALLOC is used to allocate a nodemask bitmap, and it does it by first determining whether it should be allocated on the stack or dynamically, depending on NODES_SHIFT. Right now, it goes the dynamic path whenever the nodemask_t is above 32 bytes. Although we could bump it to a reasonable value, the largest a nodemask_t can get is 128 bytes, so since __nr_hugepages_store_common is called from a rather short stack we can just get rid of the NODEMASK_ALLOC call here. This reduces some code churn and complexity. Link: http://lkml.kernel.org/r/20190402133415.21983-1-osalvador@suse.de Signed-off-by: Oscar Salvador <osalvador@suse.de> Reviewed-by: Mike Kravetz <mike.kravetz@oracle.com> Reviewed-by: Naoya Horiguchi <n-horiguchi@ah.jp.nec.com> Cc: Alex Ghiti <alex@ghiti.fr> Cc: David Rientjes <rientjes@google.com> Cc: Jing Xiangfeng <jingxiangfeng@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/hugetlb.c')
-rw-r--r--mm/hugetlb.c36
1 files changed, 11 insertions, 25 deletions
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index a81f2a8556c8..2b0abc30685d 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -2448,44 +2448,30 @@ static ssize_t __nr_hugepages_store_common(bool obey_mempolicy,
unsigned long count, size_t len)
{
int err;
- NODEMASK_ALLOC(nodemask_t, nodes_allowed, GFP_KERNEL | __GFP_NORETRY);
+ nodemask_t nodes_allowed, *n_mask;
- if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported()) {
- err = -EINVAL;
- goto out;
- }
+ if (hstate_is_gigantic(h) && !gigantic_page_runtime_supported())
+ return -EINVAL;
if (nid == NUMA_NO_NODE) {
/*
* global hstate attribute
*/
if (!(obey_mempolicy &&
- init_nodemask_of_mempolicy(nodes_allowed))) {
- NODEMASK_FREE(nodes_allowed);
- nodes_allowed = &node_states[N_MEMORY];
- }
- } else if (nodes_allowed) {
+ init_nodemask_of_mempolicy(&nodes_allowed)))
+ n_mask = &node_states[N_MEMORY];
+ else
+ n_mask = &nodes_allowed;
+ } else {
/*
* Node specific request. count adjustment happens in
* set_max_huge_pages() after acquiring hugetlb_lock.
*/
- init_nodemask_of_node(nodes_allowed, nid);
- } else {
- /*
- * Node specific request, but we could not allocate the few
- * words required for a node mask. We are unlikely to hit
- * this condition. Since we can not pass down the appropriate
- * node mask, just return ENOMEM.
- */
- err = -ENOMEM;
- goto out;
+ init_nodemask_of_node(&nodes_allowed, nid);
+ n_mask = &nodes_allowed;
}
- err = set_max_huge_pages(h, count, nid, nodes_allowed);
-
-out:
- if (nodes_allowed != &node_states[N_MEMORY])
- NODEMASK_FREE(nodes_allowed);
+ err = set_max_huge_pages(h, count, nid, n_mask);
return err ? err : len;
}