diff options
author | Tejun Heo <tj@kernel.org> | 2011-07-14 09:46:10 +0200 |
---|---|---|
committer | H. Peter Anvin <hpa@linux.intel.com> | 2011-07-14 11:45:29 -0700 |
commit | 5dfe8660a3d7f1ee1265c3536433ee53da3f98a3 (patch) | |
tree | c58232b88741ba1d8cce417b62f3f658369ad9c2 | |
parent | fc769a8e70a3348d5de49e5f69f6aff810157360 (diff) | |
download | linux-stable-5dfe8660a3d7f1ee1265c3536433ee53da3f98a3.tar.gz linux-stable-5dfe8660a3d7f1ee1265c3536433ee53da3f98a3.tar.bz2 linux-stable-5dfe8660a3d7f1ee1265c3536433ee53da3f98a3.zip |
bootmem: Replace work_with_active_regions() with for_each_mem_pfn_range()
Callback based iteration is cumbersome and much less useful than
for_each_*() iterator. This patch implements for_each_mem_pfn_range()
which replaces work_with_active_regions(). All the current users of
work_with_active_regions() are converted.
This simplifies walking over early_node_map and will allow converting
internal logics in page_alloc to use iterator instead of walking
early_node_map directly, which in turn will enable moving node
information to memblock.
powerpc change is only compile tested.
Signed-off-by: Tejun Heo <tj@kernel.org>
Link: http://lkml.kernel.org/r/20110714074610.GD3455@htj.dyndns.org
Cc: Yinghai Lu <yinghai@kernel.org>
Cc: Benjamin Herrenschmidt <benh@kernel.crashing.org>
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
-rw-r--r-- | arch/powerpc/mm/numa.c | 50 | ||||
-rw-r--r-- | arch/x86/mm/memblock.c | 23 | ||||
-rw-r--r-- | drivers/pci/intel-iommu.c | 24 | ||||
-rw-r--r-- | include/linux/mm.h | 22 | ||||
-rw-r--r-- | mm/page_alloc.c | 40 |
5 files changed, 76 insertions, 83 deletions
diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index 2164006fe170..6f06ea53bca2 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -127,45 +127,25 @@ static int __cpuinit fake_numa_create_new_node(unsigned long end_pfn, } /* - * get_active_region_work_fn - A helper function for get_node_active_region - * Returns datax set to the start_pfn and end_pfn if they contain - * the initial value of datax->start_pfn between them - * @start_pfn: start page(inclusive) of region to check - * @end_pfn: end page(exclusive) of region to check - * @datax: comes in with ->start_pfn set to value to search for and - * goes out with active range if it contains it - * Returns 1 if search value is in range else 0 - */ -static int __init get_active_region_work_fn(unsigned long start_pfn, - unsigned long end_pfn, void *datax) -{ - struct node_active_region *data; - data = (struct node_active_region *)datax; - - if (start_pfn <= data->start_pfn && end_pfn > data->start_pfn) { - data->start_pfn = start_pfn; - data->end_pfn = end_pfn; - return 1; - } - return 0; - -} - -/* - * get_node_active_region - Return active region containing start_pfn + * get_node_active_region - Return active region containing pfn * Active range returned is empty if none found. - * @start_pfn: The page to return the region for. - * @node_ar: Returned set to the active region containing start_pfn + * @pfn: The page to return the region for + * @node_ar: Returned set to the active region containing @pfn */ -static void __init get_node_active_region(unsigned long start_pfn, - struct node_active_region *node_ar) +static void __init get_node_active_region(unsigned long pfn, + struct node_active_region *node_ar) { - int nid = early_pfn_to_nid(start_pfn); + unsigned long start_pfn, end_pfn; + int i, nid; - node_ar->nid = nid; - node_ar->start_pfn = start_pfn; - node_ar->end_pfn = start_pfn; - work_with_active_regions(nid, get_active_region_work_fn, node_ar); + for_each_mem_pfn_range(i, MAX_NUMNODES, &start_pfn, &end_pfn, &nid) { + if (pfn >= start_pfn && pfn < end_pfn) { + node_ar->nid = nid; + node_ar->start_pfn = start_pfn; + node_ar->end_pfn = end_pfn; + break; + } + } } static void map_cpu_to_node(int cpu, int node) diff --git a/arch/x86/mm/memblock.c b/arch/x86/mm/memblock.c index e126117d1b03..da0d5c84586e 100644 --- a/arch/x86/mm/memblock.c +++ b/arch/x86/mm/memblock.c @@ -115,28 +115,13 @@ static void __init memblock_x86_subtract_reserved(struct range *range, int az) memblock_reserve_reserved_regions(); } -struct count_data { - int nr; -}; - -static int __init count_work_fn(unsigned long start_pfn, - unsigned long end_pfn, void *datax) -{ - struct count_data *data = datax; - - data->nr++; - - return 0; -} - static int __init count_early_node_map(int nodeid) { - struct count_data data; - - data.nr = 0; - work_with_active_regions(nodeid, count_work_fn, &data); + int i, cnt = 0; - return data.nr; + for_each_mem_pfn_range(i, nodeid, NULL, NULL, NULL) + cnt++; + return cnt; } int __init __get_free_all_memory_range(struct range **rangep, int nodeid, diff --git a/drivers/pci/intel-iommu.c b/drivers/pci/intel-iommu.c index f02c34d26d1b..8ec352077e1a 100644 --- a/drivers/pci/intel-iommu.c +++ b/drivers/pci/intel-iommu.c @@ -2178,18 +2178,6 @@ static inline void iommu_prepare_isa(void) static int md_domain_init(struct dmar_domain *domain, int guest_width); -static int __init si_domain_work_fn(unsigned long start_pfn, - unsigned long end_pfn, void *datax) -{ - int *ret = datax; - - *ret = iommu_domain_identity_map(si_domain, - (uint64_t)start_pfn << PAGE_SHIFT, - (uint64_t)end_pfn << PAGE_SHIFT); - return *ret; - -} - static int __init si_domain_init(int hw) { struct dmar_drhd_unit *drhd; @@ -2221,9 +2209,15 @@ static int __init si_domain_init(int hw) return 0; for_each_online_node(nid) { - work_with_active_regions(nid, si_domain_work_fn, &ret); - if (ret) - return ret; + unsigned long start_pfn, end_pfn; + int i; + + for_each_mem_pfn_range(i, nid, &start_pfn, &end_pfn, NULL) { + ret = iommu_domain_identity_map(si_domain, + PFN_PHYS(start_pfn), PFN_PHYS(end_pfn)); + if (ret) + return ret; + } } return 0; diff --git a/include/linux/mm.h b/include/linux/mm.h index c70a326b8f26..57e4c9ffdff8 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1327,9 +1327,27 @@ int add_from_early_node_map(struct range *range, int az, int nr_range, int nid); u64 __init find_memory_core_early(int nid, u64 size, u64 align, u64 goal, u64 limit); -typedef int (*work_fn_t)(unsigned long, unsigned long, void *); -extern void work_with_active_regions(int nid, work_fn_t work_fn, void *data); extern void sparse_memory_present_with_active_regions(int nid); + +extern void __next_mem_pfn_range(int *idx, int nid, + unsigned long *out_start_pfn, + unsigned long *out_end_pfn, int *out_nid); + +/** + * for_each_mem_pfn_range - early memory pfn range iterator + * @i: an integer used as loop variable + * @nid: node selector, %MAX_NUMNODES for all nodes + * @p_start: ptr to ulong for start pfn of the range, can be %NULL + * @p_end: ptr to ulong for end pfn of the range, can be %NULL + * @p_nid: ptr to int for nid of the range, can be %NULL + * + * Walks over configured memory ranges. Available after early_node_map is + * populated. + */ +#define for_each_mem_pfn_range(i, nid, p_start, p_end, p_nid) \ + for (i = -1, __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid); \ + i >= 0; __next_mem_pfn_range(&i, nid, p_start, p_end, p_nid)) + #endif /* CONFIG_ARCH_POPULATES_NODE_MAP */ #if !defined(CONFIG_ARCH_POPULATES_NODE_MAP) && \ diff --git a/mm/page_alloc.c b/mm/page_alloc.c index c7f0e5be4a31..69fffabf61b7 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3903,18 +3903,6 @@ int __init add_from_early_node_map(struct range *range, int az, return nr_range; } -void __init work_with_active_regions(int nid, work_fn_t work_fn, void *data) -{ - int i; - int ret; - - for_each_active_range_index_in_nid(i, nid) { - ret = work_fn(early_node_map[i].start_pfn, - early_node_map[i].end_pfn, data); - if (ret) - break; - } -} /** * sparse_memory_present_with_active_regions - Call memory_present for each active range * @nid: The node to call memory_present for. If MAX_NUMNODES, all nodes will be used. @@ -4421,6 +4409,34 @@ static inline void setup_nr_node_ids(void) } #endif +/* + * Common iterator interface used to define for_each_mem_pfn_range(). + */ +void __meminit __next_mem_pfn_range(int *idx, int nid, + unsigned long *out_start_pfn, + unsigned long *out_end_pfn, int *out_nid) +{ + struct node_active_region *r = NULL; + + while (++*idx < nr_nodemap_entries) { + if (nid == MAX_NUMNODES || nid == early_node_map[*idx].nid) { + r = &early_node_map[*idx]; + break; + } + } + if (!r) { + *idx = -1; + return; + } + + if (out_start_pfn) + *out_start_pfn = r->start_pfn; + if (out_end_pfn) + *out_end_pfn = r->end_pfn; + if (out_nid) + *out_nid = r->nid; +} + /** * add_active_range - Register a range of PFNs backed by physical memory * @nid: The node ID the range resides on |