summaryrefslogtreecommitdiffstats
path: root/arch/um/kernel
diff options
context:
space:
mode:
authorBenjamin Berg <benjamin.berg@intel.com>2024-07-03 15:45:36 +0200
committerJohannes Berg <johannes.berg@intel.com>2024-07-03 17:09:50 +0200
commitbcf3d957c63d8b6d718b862fea18c5f14ce803e2 (patch)
tree607887b1ddcbee5522d4066df731ba739bbaa26a /arch/um/kernel
parent573a446fc8ea4ca9be60b1db2091297da48d0a0d (diff)
downloadlinux-stable-bcf3d957c63d8b6d718b862fea18c5f14ce803e2.tar.gz
linux-stable-bcf3d957c63d8b6d718b862fea18c5f14ce803e2.tar.bz2
linux-stable-bcf3d957c63d8b6d718b862fea18c5f14ce803e2.zip
um: refactor TLB update handling
Conceptually, we want the memory mappings to always be up to date and represent whatever is in the TLB. To ensure that, we need to sync them over in the userspace case and for the kernel we need to process the mappings. The kernel will call flush_tlb_* if page table entries that were valid before become invalid. Unfortunately, this is not the case if entries are added. As such, change both flush_tlb_* and set_ptes to track the memory range that has to be synchronized. For the kernel, we need to execute a flush_tlb_kern_* immediately but we can wait for the first page fault in case of set_ptes. For userspace in contrast we only store that a range of memory needs to be synced and do so whenever we switch to that process. Signed-off-by: Benjamin Berg <benjamin.berg@intel.com> Link: https://patch.msgid.link/20240703134536.1161108-13-benjamin@sipsolutions.net Signed-off-by: Johannes Berg <johannes.berg@intel.com>
Diffstat (limited to 'arch/um/kernel')
-rw-r--r--arch/um/kernel/skas/process.c10
-rw-r--r--arch/um/kernel/tlb.c130
-rw-r--r--arch/um/kernel/trap.c15
3 files changed, 34 insertions, 121 deletions
diff --git a/arch/um/kernel/skas/process.c b/arch/um/kernel/skas/process.c
index 41901a74fccc..5f9c1c5f36e2 100644
--- a/arch/um/kernel/skas/process.c
+++ b/arch/um/kernel/skas/process.c
@@ -8,6 +8,8 @@
#include <linux/sched/task_stack.h>
#include <linux/sched/task.h>
+#include <asm/tlbflush.h>
+
#include <as-layout.h>
#include <kern.h>
#include <os.h>
@@ -58,3 +60,11 @@ struct mm_id *current_mm_id(void)
return &current->mm->context.id;
}
+
+void current_mm_sync(void)
+{
+ if (current->mm == NULL)
+ return;
+
+ um_tlb_sync(current->mm);
+}
diff --git a/arch/um/kernel/tlb.c b/arch/um/kernel/tlb.c
index 2ba1865679ae..44c6fc697f3a 100644
--- a/arch/um/kernel/tlb.c
+++ b/arch/um/kernel/tlb.c
@@ -170,14 +170,16 @@ static inline int update_p4d_range(pgd_t *pgd, unsigned long addr,
return ret;
}
-static int fix_range_common(struct mm_struct *mm, unsigned long start_addr,
- unsigned long end_addr)
+int um_tlb_sync(struct mm_struct *mm)
{
pgd_t *pgd;
struct vm_ops ops;
- unsigned long addr = start_addr, next;
+ unsigned long addr = mm->context.sync_tlb_range_from, next;
int ret = 0;
+ if (mm->context.sync_tlb_range_to == 0)
+ return 0;
+
ops.mm_idp = &mm->context.id;
if (mm == &init_mm) {
ops.mmap = kern_map;
@@ -191,7 +193,7 @@ static int fix_range_common(struct mm_struct *mm, unsigned long start_addr,
pgd = pgd_offset(mm, addr);
do {
- next = pgd_addr_end(addr, end_addr);
+ next = pgd_addr_end(addr, mm->context.sync_tlb_range_to);
if (!pgd_present(*pgd)) {
if (pgd_newpage(*pgd)) {
ret = ops.unmap(ops.mm_idp, addr,
@@ -200,87 +202,16 @@ static int fix_range_common(struct mm_struct *mm, unsigned long start_addr,
}
} else
ret = update_p4d_range(pgd, addr, next, &ops);
- } while (pgd++, addr = next, ((addr < end_addr) && !ret));
+ } while (pgd++, addr = next,
+ ((addr < mm->context.sync_tlb_range_to) && !ret));
if (ret == -ENOMEM)
report_enomem();
- return ret;
-}
-
-static void flush_tlb_kernel_range_common(unsigned long start, unsigned long end)
-{
- int err;
-
- err = fix_range_common(&init_mm, start, end);
-
- if (err)
- panic("flush_tlb_kernel failed, errno = %d\n", err);
-}
-
-void flush_tlb_page(struct vm_area_struct *vma, unsigned long address)
-{
- pgd_t *pgd;
- p4d_t *p4d;
- pud_t *pud;
- pmd_t *pmd;
- pte_t *pte;
- struct mm_struct *mm = vma->vm_mm;
- int r, w, x, prot;
- struct mm_id *mm_id;
-
- address &= PAGE_MASK;
-
- pgd = pgd_offset(mm, address);
- if (!pgd_present(*pgd))
- goto kill;
-
- p4d = p4d_offset(pgd, address);
- if (!p4d_present(*p4d))
- goto kill;
-
- pud = pud_offset(p4d, address);
- if (!pud_present(*pud))
- goto kill;
-
- pmd = pmd_offset(pud, address);
- if (!pmd_present(*pmd))
- goto kill;
-
- pte = pte_offset_kernel(pmd, address);
-
- r = pte_read(*pte);
- w = pte_write(*pte);
- x = pte_exec(*pte);
- if (!pte_young(*pte)) {
- r = 0;
- w = 0;
- } else if (!pte_dirty(*pte)) {
- w = 0;
- }
-
- mm_id = &mm->context.id;
- prot = ((r ? UM_PROT_READ : 0) | (w ? UM_PROT_WRITE : 0) |
- (x ? UM_PROT_EXEC : 0));
- if (pte_newpage(*pte)) {
- if (pte_present(*pte)) {
- unsigned long long offset;
- int fd;
-
- fd = phys_mapping(pte_val(*pte) & PAGE_MASK, &offset);
- map(mm_id, address, PAGE_SIZE, prot, fd, offset);
- } else
- unmap(mm_id, address, PAGE_SIZE);
- } else if (pte_newprot(*pte))
- protect(mm_id, address, PAGE_SIZE, prot);
-
- *pte = pte_mkuptodate(*pte);
+ mm->context.sync_tlb_range_from = 0;
+ mm->context.sync_tlb_range_to = 0;
- return;
-
-kill:
- printk(KERN_ERR "Failed to flush page for address 0x%lx\n", address);
- force_sig(SIGKILL);
+ return ret;
}
void flush_tlb_all(void)
@@ -295,48 +226,11 @@ void flush_tlb_all(void)
flush_tlb_mm(current->mm);
}
-void flush_tlb_kernel_range(unsigned long start, unsigned long end)
-{
- flush_tlb_kernel_range_common(start, end);
-}
-
-void flush_tlb_kernel_vm(void)
-{
- flush_tlb_kernel_range_common(start_vm, end_vm);
-}
-
-void __flush_tlb_one(unsigned long addr)
-{
- flush_tlb_kernel_range_common(addr, addr + PAGE_SIZE);
-}
-
-static void fix_range(struct mm_struct *mm, unsigned long start_addr,
- unsigned long end_addr)
-{
- /*
- * Don't bother flushing if this address space is about to be
- * destroyed.
- */
- if (atomic_read(&mm->mm_users) == 0)
- return;
-
- fix_range_common(mm, start_addr, end_addr);
-}
-
-void flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
- unsigned long end)
-{
- if (vma->vm_mm == NULL)
- flush_tlb_kernel_range_common(start, end);
- else fix_range(vma->vm_mm, start, end);
-}
-EXPORT_SYMBOL(flush_tlb_range);
-
void flush_tlb_mm(struct mm_struct *mm)
{
struct vm_area_struct *vma;
VMA_ITERATOR(vmi, mm, 0);
for_each_vma(vmi, vma)
- fix_range(mm, vma->vm_start, vma->vm_end);
+ um_tlb_mark_sync(mm, vma->vm_start, vma->vm_end);
}
diff --git a/arch/um/kernel/trap.c b/arch/um/kernel/trap.c
index 6d8ae86ae978..97c8df9c4401 100644
--- a/arch/um/kernel/trap.c
+++ b/arch/um/kernel/trap.c
@@ -113,7 +113,7 @@ good_area:
#if 0
WARN_ON(!pte_young(*pte) || (is_write && !pte_dirty(*pte)));
#endif
- flush_tlb_page(vma, address);
+
out:
mmap_read_unlock(mm);
out_nosemaphore:
@@ -210,8 +210,17 @@ unsigned long segv(struct faultinfo fi, unsigned long ip, int is_user,
if (!is_user && regs)
current->thread.segv_regs = container_of(regs, struct pt_regs, regs);
- if (!is_user && (address >= start_vm) && (address < end_vm)) {
- flush_tlb_kernel_vm();
+ if (!is_user && init_mm.context.sync_tlb_range_to) {
+ /*
+ * Kernel has pending updates from set_ptes that were not
+ * flushed yet. Syncing them should fix the pagefault (if not
+ * we'll get here again and panic).
+ */
+ err = um_tlb_sync(&init_mm);
+ if (err == -ENOMEM)
+ report_enomem();
+ if (err)
+ panic("Failed to sync kernel TLBs: %d", err);
goto out;
}
else if (current->mm == NULL) {