summaryrefslogtreecommitdiffstats
path: root/mm/memremap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/memremap.c')
-rw-r--r--mm/memremap.c77
1 files changed, 39 insertions, 38 deletions
diff --git a/mm/memremap.c b/mm/memremap.c
index 006dace60b1a..d958d348b3ca 100644
--- a/mm/memremap.c
+++ b/mm/memremap.c
@@ -70,24 +70,24 @@ static void devmap_managed_enable_put(void)
}
#endif /* CONFIG_DEV_PAGEMAP_OPS */
-static void pgmap_array_delete(struct resource *res)
+static void pgmap_array_delete(struct range *range)
{
- xa_store_range(&pgmap_array, PHYS_PFN(res->start), PHYS_PFN(res->end),
+ xa_store_range(&pgmap_array, PHYS_PFN(range->start), PHYS_PFN(range->end),
NULL, GFP_KERNEL);
synchronize_rcu();
}
static unsigned long pfn_first(struct dev_pagemap *pgmap)
{
- return PHYS_PFN(pgmap->res.start) +
+ return PHYS_PFN(pgmap->range.start) +
vmem_altmap_offset(pgmap_altmap(pgmap));
}
static unsigned long pfn_end(struct dev_pagemap *pgmap)
{
- const struct resource *res = &pgmap->res;
+ const struct range *range = &pgmap->range;
- return (res->start + resource_size(res)) >> PAGE_SHIFT;
+ return (range->start + range_len(range)) >> PAGE_SHIFT;
}
static unsigned long pfn_next(unsigned long pfn)
@@ -126,7 +126,7 @@ static void dev_pagemap_cleanup(struct dev_pagemap *pgmap)
void memunmap_pages(struct dev_pagemap *pgmap)
{
- struct resource *res = &pgmap->res;
+ struct range *range = &pgmap->range;
struct page *first_page;
unsigned long pfn;
int nid;
@@ -143,20 +143,20 @@ void memunmap_pages(struct dev_pagemap *pgmap)
nid = page_to_nid(first_page);
mem_hotplug_begin();
- remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(res->start),
- PHYS_PFN(resource_size(res)));
+ remove_pfn_range_from_zone(page_zone(first_page), PHYS_PFN(range->start),
+ PHYS_PFN(range_len(range)));
if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
- __remove_pages(PHYS_PFN(res->start),
- PHYS_PFN(resource_size(res)), NULL);
+ __remove_pages(PHYS_PFN(range->start),
+ PHYS_PFN(range_len(range)), NULL);
} else {
- arch_remove_memory(nid, res->start, resource_size(res),
+ arch_remove_memory(nid, range->start, range_len(range),
pgmap_altmap(pgmap));
- kasan_remove_zero_shadow(__va(res->start), resource_size(res));
+ kasan_remove_zero_shadow(__va(range->start), range_len(range));
}
mem_hotplug_done();
- untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res));
- pgmap_array_delete(res);
+ untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range));
+ pgmap_array_delete(range);
WARN_ONCE(pgmap->altmap.alloc, "failed to free all reserved pages\n");
devmap_managed_enable_put();
}
@@ -182,7 +182,7 @@ static void dev_pagemap_percpu_release(struct percpu_ref *ref)
*/
void *memremap_pages(struct dev_pagemap *pgmap, int nid)
{
- struct resource *res = &pgmap->res;
+ struct range *range = &pgmap->range;
struct dev_pagemap *conflict_pgmap;
struct mhp_params params = {
/*
@@ -251,7 +251,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
return ERR_PTR(error);
}
- conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->start), NULL);
+ conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->start), NULL);
if (conflict_pgmap) {
WARN(1, "Conflicting mapping in same section\n");
put_dev_pagemap(conflict_pgmap);
@@ -259,7 +259,7 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
goto err_array;
}
- conflict_pgmap = get_dev_pagemap(PHYS_PFN(res->end), NULL);
+ conflict_pgmap = get_dev_pagemap(PHYS_PFN(range->end), NULL);
if (conflict_pgmap) {
WARN(1, "Conflicting mapping in same section\n");
put_dev_pagemap(conflict_pgmap);
@@ -267,26 +267,27 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
goto err_array;
}
- is_ram = region_intersects(res->start, resource_size(res),
+ is_ram = region_intersects(range->start, range_len(range),
IORESOURCE_SYSTEM_RAM, IORES_DESC_NONE);
if (is_ram != REGION_DISJOINT) {
- WARN_ONCE(1, "%s attempted on %s region %pr\n", __func__,
- is_ram == REGION_MIXED ? "mixed" : "ram", res);
+ WARN_ONCE(1, "attempted on %s region %#llx-%#llx\n",
+ is_ram == REGION_MIXED ? "mixed" : "ram",
+ range->start, range->end);
error = -ENXIO;
goto err_array;
}
- error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(res->start),
- PHYS_PFN(res->end), pgmap, GFP_KERNEL));
+ error = xa_err(xa_store_range(&pgmap_array, PHYS_PFN(range->start),
+ PHYS_PFN(range->end), pgmap, GFP_KERNEL));
if (error)
goto err_array;
if (nid < 0)
nid = numa_mem_id();
- error = track_pfn_remap(NULL, &params.pgprot, PHYS_PFN(res->start),
- 0, resource_size(res));
+ error = track_pfn_remap(NULL, &params.pgprot, PHYS_PFN(range->start), 0,
+ range_len(range));
if (error)
goto err_pfn_remap;
@@ -304,16 +305,16 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
* arch_add_memory().
*/
if (pgmap->type == MEMORY_DEVICE_PRIVATE) {
- error = add_pages(nid, PHYS_PFN(res->start),
- PHYS_PFN(resource_size(res)), &params);
+ error = add_pages(nid, PHYS_PFN(range->start),
+ PHYS_PFN(range_len(range)), &params);
} else {
- error = kasan_add_zero_shadow(__va(res->start), resource_size(res));
+ error = kasan_add_zero_shadow(__va(range->start), range_len(range));
if (error) {
mem_hotplug_done();
goto err_kasan;
}
- error = arch_add_memory(nid, res->start, resource_size(res),
+ error = arch_add_memory(nid, range->start, range_len(range),
&params);
}
@@ -321,8 +322,8 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
struct zone *zone;
zone = &NODE_DATA(nid)->node_zones[ZONE_DEVICE];
- move_pfn_range_to_zone(zone, PHYS_PFN(res->start),
- PHYS_PFN(resource_size(res)), params.altmap);
+ move_pfn_range_to_zone(zone, PHYS_PFN(range->start),
+ PHYS_PFN(range_len(range)), params.altmap);
}
mem_hotplug_done();
@@ -334,17 +335,17 @@ void *memremap_pages(struct dev_pagemap *pgmap, int nid)
* to allow us to do the work while not holding the hotplug lock.
*/
memmap_init_zone_device(&NODE_DATA(nid)->node_zones[ZONE_DEVICE],
- PHYS_PFN(res->start),
- PHYS_PFN(resource_size(res)), pgmap);
+ PHYS_PFN(range->start),
+ PHYS_PFN(range_len(range)), pgmap);
percpu_ref_get_many(pgmap->ref, pfn_end(pgmap) - pfn_first(pgmap));
- return __va(res->start);
+ return __va(range->start);
err_add_memory:
- kasan_remove_zero_shadow(__va(res->start), resource_size(res));
+ kasan_remove_zero_shadow(__va(range->start), range_len(range));
err_kasan:
- untrack_pfn(NULL, PHYS_PFN(res->start), resource_size(res));
+ untrack_pfn(NULL, PHYS_PFN(range->start), range_len(range));
err_pfn_remap:
- pgmap_array_delete(res);
+ pgmap_array_delete(range);
err_array:
dev_pagemap_kill(pgmap);
dev_pagemap_cleanup(pgmap);
@@ -369,7 +370,7 @@ EXPORT_SYMBOL_GPL(memremap_pages);
* 'live' on entry and will be killed and reaped at
* devm_memremap_pages_release() time, or if this routine fails.
*
- * 4/ res is expected to be a host memory range that could feasibly be
+ * 4/ range is expected to be a host memory range that could feasibly be
* treated as a "System RAM" range, i.e. not a device mmio range, but
* this is not enforced.
*/
@@ -426,7 +427,7 @@ struct dev_pagemap *get_dev_pagemap(unsigned long pfn,
* In the cached case we're already holding a live reference.
*/
if (pgmap) {
- if (phys >= pgmap->res.start && phys <= pgmap->res.end)
+ if (phys >= pgmap->range.start && phys <= pgmap->range.end)
return pgmap;
put_dev_pagemap(pgmap);
}