summaryrefslogtreecommitdiffstats
path: root/mm/vmalloc.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/vmalloc.c')
-rw-r--r--mm/vmalloc.c113
1 files changed, 94 insertions, 19 deletions
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index e86ba6e74b50..e5e9e1fcac01 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -18,6 +18,7 @@
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
+#include <linux/set_memory.h>
#include <linux/debugobjects.h>
#include <linux/kallsyms.h>
#include <linux/list.h>
@@ -1059,24 +1060,9 @@ static void vb_free(const void *addr, unsigned long size)
spin_unlock(&vb->lock);
}
-/**
- * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
- *
- * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
- * to amortize TLB flushing overheads. What this means is that any page you
- * have now, may, in a former life, have been mapped into kernel virtual
- * address by the vmap layer and so there might be some CPUs with TLB entries
- * still referencing that page (additional to the regular 1:1 kernel mapping).
- *
- * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
- * be sure that none of the pages we have control over will have any aliases
- * from the vmap layer.
- */
-void vm_unmap_aliases(void)
+static void _vm_unmap_aliases(unsigned long start, unsigned long end, int flush)
{
- unsigned long start = ULONG_MAX, end = 0;
int cpu;
- int flush = 0;
if (unlikely(!vmap_initialized))
return;
@@ -1113,6 +1099,27 @@ void vm_unmap_aliases(void)
flush_tlb_kernel_range(start, end);
mutex_unlock(&vmap_purge_lock);
}
+
+/**
+ * vm_unmap_aliases - unmap outstanding lazy aliases in the vmap layer
+ *
+ * The vmap/vmalloc layer lazily flushes kernel virtual mappings primarily
+ * to amortize TLB flushing overheads. What this means is that any page you
+ * have now, may, in a former life, have been mapped into kernel virtual
+ * address by the vmap layer and so there might be some CPUs with TLB entries
+ * still referencing that page (additional to the regular 1:1 kernel mapping).
+ *
+ * vm_unmap_aliases flushes all such lazy mappings. After it returns, we can
+ * be sure that none of the pages we have control over will have any aliases
+ * from the vmap layer.
+ */
+void vm_unmap_aliases(void)
+{
+ unsigned long start = ULONG_MAX, end = 0;
+ int flush = 0;
+
+ _vm_unmap_aliases(start, end, flush);
+}
EXPORT_SYMBOL_GPL(vm_unmap_aliases);
/**
@@ -1505,6 +1512,72 @@ struct vm_struct *remove_vm_area(const void *addr)
return NULL;
}
+static inline void set_area_direct_map(const struct vm_struct *area,
+ int (*set_direct_map)(struct page *page))
+{
+ int i;
+
+ for (i = 0; i < area->nr_pages; i++)
+ if (page_address(area->pages[i]))
+ set_direct_map(area->pages[i]);
+}
+
+/* Handle removing and resetting vm mappings related to the vm_struct. */
+static void vm_remove_mappings(struct vm_struct *area, int deallocate_pages)
+{
+ unsigned long addr = (unsigned long)area->addr;
+ unsigned long start = ULONG_MAX, end = 0;
+ int flush_reset = area->flags & VM_FLUSH_RESET_PERMS;
+ int i;
+
+ /*
+ * The below block can be removed when all architectures that have
+ * direct map permissions also have set_direct_map_() implementations.
+ * This is concerned with resetting the direct map any an vm alias with
+ * execute permissions, without leaving a RW+X window.
+ */
+ if (flush_reset && !IS_ENABLED(CONFIG_ARCH_HAS_SET_DIRECT_MAP)) {
+ set_memory_nx(addr, area->nr_pages);
+ set_memory_rw(addr, area->nr_pages);
+ }
+
+ remove_vm_area(area->addr);
+
+ /* If this is not VM_FLUSH_RESET_PERMS memory, no need for the below. */
+ if (!flush_reset)
+ return;
+
+ /*
+ * If not deallocating pages, just do the flush of the VM area and
+ * return.
+ */
+ if (!deallocate_pages) {
+ vm_unmap_aliases();
+ return;
+ }
+
+ /*
+ * If execution gets here, flush the vm mapping and reset the direct
+ * map. Find the start and end range of the direct mappings to make sure
+ * the vm_unmap_aliases() flush includes the direct map.
+ */
+ for (i = 0; i < area->nr_pages; i++) {
+ if (page_address(area->pages[i])) {
+ start = min(addr, start);
+ end = max(addr, end);
+ }
+ }
+
+ /*
+ * Set direct map to something invalid so that it won't be cached if
+ * there are any accesses after the TLB flush, then flush the TLB and
+ * reset the direct map permissions to the default.
+ */
+ set_area_direct_map(area, set_direct_map_invalid_noflush);
+ _vm_unmap_aliases(start, end, 1);
+ set_area_direct_map(area, set_direct_map_default_noflush);
+}
+
static void __vunmap(const void *addr, int deallocate_pages)
{
struct vm_struct *area;
@@ -1526,7 +1599,8 @@ static void __vunmap(const void *addr, int deallocate_pages)
debug_check_no_locks_freed(area->addr, get_vm_area_size(area));
debug_check_no_obj_freed(area->addr, get_vm_area_size(area));
- remove_vm_area(addr);
+ vm_remove_mappings(area, deallocate_pages);
+
if (deallocate_pages) {
int i;
@@ -1961,8 +2035,9 @@ EXPORT_SYMBOL(vzalloc_node);
*/
void *vmalloc_exec(unsigned long size)
{
- return __vmalloc_node(size, 1, GFP_KERNEL, PAGE_KERNEL_EXEC,
- NUMA_NO_NODE, __builtin_return_address(0));
+ return __vmalloc_node_range(size, 1, VMALLOC_START, VMALLOC_END,
+ GFP_KERNEL, PAGE_KERNEL_EXEC, VM_FLUSH_RESET_PERMS,
+ NUMA_NO_NODE, __builtin_return_address(0));
}
#if defined(CONFIG_64BIT) && defined(CONFIG_ZONE_DMA32)