summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorWei Yang <richard.weiyang@gmail.com>2018-12-28 00:37:02 -0800
committerLinus Torvalds <torvalds@linux-foundation.org>2018-12-28 12:11:49 -0800
commit83af658898cb292a32d8b6cd9b51266d7cfc4b6a (patch)
tree7a6c4daecefff22c007fb1326f10737a17920eb2
parent9e247bab0668a5893b3efa131cec5b5859467834 (diff)
downloadlinux-83af658898cb292a32d8b6cd9b51266d7cfc4b6a.tar.gz
linux-83af658898cb292a32d8b6cd9b51266d7cfc4b6a.tar.bz2
linux-83af658898cb292a32d8b6cd9b51266d7cfc4b6a.zip
mm, sparse: drop pgdat_resize_lock in sparse_add/remove_one_section()
pgdat_resize_lock is used to protect pgdat's memory region information like: node_start_pfn, node_present_pages, etc. While in function sparse_add/remove_one_section(), pgdat_resize_lock is used to protect initialization/release of one mem_section. This looks not proper. These code paths are currently protected by mem_hotplug_lock currently but should there ever be any reason for locking at the sparse layer a dedicated lock should be introduced. Following is the current call trace of sparse_add/remove_one_section() mem_hotplug_begin() arch_add_memory() add_pages() __add_pages() __add_section() sparse_add_one_section() mem_hotplug_done() mem_hotplug_begin() arch_remove_memory() __remove_pages() __remove_section() sparse_remove_one_section() mem_hotplug_done() The comment above the pgdat_resize_lock also mentions "Holding this will also guarantee that any pfn_valid() stays that way.", which is true with the current implementation and false after this patch. But current implementation doesn't meet this comment. There isn't any pfn walkers to take the lock so this looks like a relict from the past. This patch also removes this comment. [richard.weiyang@gmail.com: v4] Link: http://lkml.kernel.org/r/20181204085657.20472-1-richard.weiyang@gmail.com [mhocko@suse.com: changelog suggestion] Link: http://lkml.kernel.org/r/20181128091243.19249-1-richard.weiyang@gmail.com Signed-off-by: Wei Yang <richard.weiyang@gmail.com> Reviewed-by: David Hildenbrand <david@redhat.com> Acked-by: Michal Hocko <mhocko@suse.com> Cc: Dave Hansen <dave.hansen@intel.com> Cc: Oscar Salvador <osalvador@suse.de> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
-rw-r--r--include/linux/mmzone.h3
-rw-r--r--mm/sparse.c9
2 files changed, 2 insertions, 10 deletions
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index a6e300732ec7..fc4b5cdb6c2d 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -637,8 +637,7 @@ typedef struct pglist_data {
#if defined(CONFIG_MEMORY_HOTPLUG) || defined(CONFIG_DEFERRED_STRUCT_PAGE_INIT)
/*
* Must be held any time you expect node_start_pfn, node_present_pages
- * or node_spanned_pages stay constant. Holding this will also
- * guarantee that any pfn_valid() stays that way.
+ * or node_spanned_pages stay constant.
*
* pgdat_resize_lock() and pgdat_resize_unlock() are provided to
* manipulate node_size_lock without checking for CONFIG_MEMORY_HOTPLUG
diff --git a/mm/sparse.c b/mm/sparse.c
index 691544a2814c..7323a03fbc39 100644
--- a/mm/sparse.c
+++ b/mm/sparse.c
@@ -685,7 +685,6 @@ int __meminit sparse_add_one_section(struct pglist_data *pgdat,
struct mem_section *ms;
struct page *memmap;
unsigned long *usemap;
- unsigned long flags;
int ret;
/*
@@ -705,8 +704,6 @@ int __meminit sparse_add_one_section(struct pglist_data *pgdat,
return -ENOMEM;
}
- pgdat_resize_lock(pgdat, &flags);
-
ms = __pfn_to_section(start_pfn);
if (ms->section_mem_map & SECTION_MARKED_PRESENT) {
ret = -EEXIST;
@@ -723,7 +720,6 @@ int __meminit sparse_add_one_section(struct pglist_data *pgdat,
sparse_init_one_section(ms, section_nr, memmap, usemap);
out:
- pgdat_resize_unlock(pgdat, &flags);
if (ret < 0) {
kfree(usemap);
__kfree_section_memmap(memmap, altmap);
@@ -794,10 +790,8 @@ void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
unsigned long map_offset, struct vmem_altmap *altmap)
{
struct page *memmap = NULL;
- unsigned long *usemap = NULL, flags;
- struct pglist_data *pgdat = zone->zone_pgdat;
+ unsigned long *usemap = NULL;
- pgdat_resize_lock(pgdat, &flags);
if (ms->section_mem_map) {
usemap = ms->pageblock_flags;
memmap = sparse_decode_mem_map(ms->section_mem_map,
@@ -805,7 +799,6 @@ void sparse_remove_one_section(struct zone *zone, struct mem_section *ms,
ms->section_mem_map = 0;
ms->pageblock_flags = NULL;
}
- pgdat_resize_unlock(pgdat, &flags);
clear_hwpoisoned_pages(memmap + map_offset,
PAGES_PER_SECTION - map_offset);