summaryrefslogtreecommitdiffstats
path: root/arch
diff options
context:
space:
mode:
authorHeiko Carstens <hca@linux.ibm.com>2023-10-24 10:15:19 +0200
committerGreg Kroah-Hartman <gregkh@linuxfoundation.org>2023-11-28 17:20:05 +0000
commit7453e81061a492085f00e9d3bcb26a95217577d9 (patch)
tree629ca1af6619faa498c3a1048c8ae49407875a32 /arch
parentbfabe8d0c1c106f6567f5688f29fd4cee1675d6c (diff)
downloadlinux-stable-7453e81061a492085f00e9d3bcb26a95217577d9.tar.gz
linux-stable-7453e81061a492085f00e9d3bcb26a95217577d9.tar.bz2
linux-stable-7453e81061a492085f00e9d3bcb26a95217577d9.zip
s390/cmma: fix detection of DAT pages
commit 44d93045247661acbd50b1629e62f415f2747577 upstream. If the cmma no-dat feature is available the kernel page tables are walked to identify and mark all pages which are used for address translation (all region, segment, and page tables). In a subsequent loop all other pages are marked as "no-dat" pages with the ESSA instruction. This information is visible to the hypervisor, so that the hypervisor can optimize purging of guest TLB entries. The initial loop however is incorrect: only the first three of the four pages which belong to segment and region tables will be marked as being used for DAT. The last page is incorrectly marked as no-dat. This can result in incorrect guest TLB flushes. Fix this by simply marking all four pages. Cc: <stable@vger.kernel.org> Reviewed-by: Claudio Imbrenda <imbrenda@linux.ibm.com> Signed-off-by: Heiko Carstens <hca@linux.ibm.com> Signed-off-by: Vasily Gorbik <gor@linux.ibm.com> Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
Diffstat (limited to 'arch')
-rw-r--r--arch/s390/mm/page-states.c6
1 files changed, 3 insertions, 3 deletions
diff --git a/arch/s390/mm/page-states.c b/arch/s390/mm/page-states.c
index 1e2ea706aa22..00e7b0876dc5 100644
--- a/arch/s390/mm/page-states.c
+++ b/arch/s390/mm/page-states.c
@@ -121,7 +121,7 @@ static void mark_kernel_pud(p4d_t *p4d, unsigned long addr, unsigned long end)
continue;
if (!pud_folded(*pud)) {
page = phys_to_page(pud_val(*pud));
- for (i = 0; i < 3; i++)
+ for (i = 0; i < 4; i++)
set_bit(PG_arch_1, &page[i].flags);
}
mark_kernel_pmd(pud, addr, next);
@@ -142,7 +142,7 @@ static void mark_kernel_p4d(pgd_t *pgd, unsigned long addr, unsigned long end)
continue;
if (!p4d_folded(*p4d)) {
page = phys_to_page(p4d_val(*p4d));
- for (i = 0; i < 3; i++)
+ for (i = 0; i < 4; i++)
set_bit(PG_arch_1, &page[i].flags);
}
mark_kernel_pud(p4d, addr, next);
@@ -164,7 +164,7 @@ static void mark_kernel_pgd(void)
continue;
if (!pgd_folded(*pgd)) {
page = phys_to_page(pgd_val(*pgd));
- for (i = 0; i < 3; i++)
+ for (i = 0; i < 4; i++)
set_bit(PG_arch_1, &page[i].flags);
}
mark_kernel_p4d(pgd, addr, next);