summaryrefslogtreecommitdiffstats
path: root/lib/alloc_tag.c
diff options
context:
space:
mode:
Diffstat (limited to 'lib/alloc_tag.c')
-rw-r--r--lib/alloc_tag.c80
1 files changed, 61 insertions, 19 deletions
diff --git a/lib/alloc_tag.c b/lib/alloc_tag.c
index 2414a7ee7ec7..65e706e1bc19 100644
--- a/lib/alloc_tag.c
+++ b/lib/alloc_tag.c
@@ -189,26 +189,44 @@ void pgalloc_tag_split(struct folio *folio, int old_order, int new_order)
}
}
-void pgalloc_tag_copy(struct folio *new, struct folio *old)
+void pgalloc_tag_swap(struct folio *new, struct folio *old)
{
- union pgtag_ref_handle handle;
- union codetag_ref ref;
- struct alloc_tag *tag;
+ union pgtag_ref_handle handle_old, handle_new;
+ union codetag_ref ref_old, ref_new;
+ struct alloc_tag *tag_old, *tag_new;
- tag = pgalloc_tag_get(&old->page);
- if (!tag)
+ if (!mem_alloc_profiling_enabled())
return;
- if (!get_page_tag_ref(&new->page, &ref, &handle))
+ tag_old = pgalloc_tag_get(&old->page);
+ if (!tag_old)
+ return;
+ tag_new = pgalloc_tag_get(&new->page);
+ if (!tag_new)
return;
- /* Clear the old ref to the original allocation tag. */
- clear_page_tag_ref(&old->page);
- /* Decrement the counters of the tag on get_new_folio. */
- alloc_tag_sub(&ref, folio_size(new));
- __alloc_tag_ref_set(&ref, tag);
- update_page_tag_ref(handle, &ref);
- put_page_tag_ref(handle);
+ if (!get_page_tag_ref(&old->page, &ref_old, &handle_old))
+ return;
+ if (!get_page_tag_ref(&new->page, &ref_new, &handle_new)) {
+ put_page_tag_ref(handle_old);
+ return;
+ }
+
+ /*
+ * Clear tag references to avoid debug warning when using
+ * __alloc_tag_ref_set() with non-empty reference.
+ */
+ set_codetag_empty(&ref_old);
+ set_codetag_empty(&ref_new);
+
+ /* swap tags */
+ __alloc_tag_ref_set(&ref_old, tag_new);
+ update_page_tag_ref(handle_old, &ref_old);
+ __alloc_tag_ref_set(&ref_new, tag_old);
+ update_page_tag_ref(handle_new, &ref_new);
+
+ put_page_tag_ref(handle_old);
+ put_page_tag_ref(handle_new);
}
static void shutdown_mem_profiling(bool remove_file)
@@ -393,28 +411,52 @@ repeat:
static int vm_module_tags_populate(void)
{
- unsigned long phys_size = vm_module_tags->nr_pages << PAGE_SHIFT;
+ unsigned long phys_end = ALIGN_DOWN(module_tags.start_addr, PAGE_SIZE) +
+ (vm_module_tags->nr_pages << PAGE_SHIFT);
+ unsigned long new_end = module_tags.start_addr + module_tags.size;
- if (phys_size < module_tags.size) {
+ if (phys_end < new_end) {
struct page **next_page = vm_module_tags->pages + vm_module_tags->nr_pages;
- unsigned long addr = module_tags.start_addr + phys_size;
+ unsigned long old_shadow_end = ALIGN(phys_end, MODULE_ALIGN);
+ unsigned long new_shadow_end = ALIGN(new_end, MODULE_ALIGN);
unsigned long more_pages;
unsigned long nr;
- more_pages = ALIGN(module_tags.size - phys_size, PAGE_SIZE) >> PAGE_SHIFT;
+ more_pages = ALIGN(new_end - phys_end, PAGE_SIZE) >> PAGE_SHIFT;
nr = alloc_pages_bulk_array_node(GFP_KERNEL | __GFP_NOWARN,
NUMA_NO_NODE, more_pages, next_page);
if (nr < more_pages ||
- vmap_pages_range(addr, addr + (nr << PAGE_SHIFT), PAGE_KERNEL,
+ vmap_pages_range(phys_end, phys_end + (nr << PAGE_SHIFT), PAGE_KERNEL,
next_page, PAGE_SHIFT) < 0) {
/* Clean up and error out */
for (int i = 0; i < nr; i++)
__free_page(next_page[i]);
return -ENOMEM;
}
+
vm_module_tags->nr_pages += nr;
+
+ /*
+ * Kasan allocates 1 byte of shadow for every 8 bytes of data.
+ * When kasan_alloc_module_shadow allocates shadow memory,
+ * its unit of allocation is a page.
+ * Therefore, here we need to align to MODULE_ALIGN.
+ */
+ if (old_shadow_end < new_shadow_end)
+ kasan_alloc_module_shadow((void *)old_shadow_end,
+ new_shadow_end - old_shadow_end,
+ GFP_KERNEL);
}
+ /*
+ * Mark the pages as accessible, now that they are mapped.
+ * With hardware tag-based KASAN, marking is skipped for
+ * non-VM_ALLOC mappings, see __kasan_unpoison_vmalloc().
+ */
+ kasan_unpoison_vmalloc((void *)module_tags.start_addr,
+ new_end - module_tags.start_addr,
+ KASAN_VMALLOC_PROT_NORMAL);
+
return 0;
}