summaryrefslogtreecommitdiffstats
path: root/arch/powerpc
diff options
context:
space:
mode:
authorPaul Mackerras <paulus@samba.org>2008-08-30 11:26:27 +1000
committerPaul Mackerras <paulus@samba.org>2008-09-03 20:53:22 +1000
commit9e88ba4e45ecad2425c4cc4e0861a26f4e36c6da (patch)
treebcd48acbd2420eda4d9018d3c0c697cdb84c44f6 /arch/powerpc
parent78fbc824ed8225edd80cdc57771d5ca4f7aae95e (diff)
downloadlinux-9e88ba4e45ecad2425c4cc4e0861a26f4e36c6da.tar.gz
linux-9e88ba4e45ecad2425c4cc4e0861a26f4e36c6da.tar.bz2
linux-9e88ba4e45ecad2425c4cc4e0861a26f4e36c6da.zip
powerpc: Only make kernel text pages of linear mapping executable
Commit bc033b63bbfeb6c4b4eb0a1d083c650e4a0d2af8 ("powerpc/mm: Fix attribute confusion with htab_bolt_mapping()") moved the check for whether we should make pages of the linear mapping executable from htab_bolt_mapping into its callers, including htab_initialize. A side-effect of this is that the decision is now made once for each contiguous section in the LMB array rather than for each page individually. This can often mean that the whole of the linear mapping ends up being executable. This reverts to the previous behaviour, where individual pages are checked for being part of the kernel text or not, by moving the check back down into htab_bolt_mapping. Acked-by: Benjamin Herrenschmidt <benh@kernel.crashing.org> Signed-off-by: Paul Mackerras <paulus@samba.org>
Diffstat (limited to 'arch/powerpc')
-rw-r--r--arch/powerpc/mm/hash_utils_64.c18
1 files changed, 11 insertions, 7 deletions
diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c
index 14be408dfc9b..8920eea34528 100644
--- a/arch/powerpc/mm/hash_utils_64.c
+++ b/arch/powerpc/mm/hash_utils_64.c
@@ -191,12 +191,17 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend,
unsigned long hash, hpteg;
unsigned long vsid = get_kernel_vsid(vaddr, ssize);
unsigned long va = hpt_va(vaddr, vsid, ssize);
+ unsigned long tprot = prot;
+
+ /* Make kernel text executable */
+ if (in_kernel_text(vaddr))
+ tprot &= ~HPTE_R_N;
hash = hpt_hash(va, shift, ssize);
hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
BUG_ON(!ppc_md.hpte_insert);
- ret = ppc_md.hpte_insert(hpteg, va, paddr, prot,
+ ret = ppc_md.hpte_insert(hpteg, va, paddr, tprot,
HPTE_V_BOLTED, psize, ssize);
if (ret < 0)
@@ -584,7 +589,7 @@ void __init htab_initialize(void)
{
unsigned long table;
unsigned long pteg_count;
- unsigned long prot, tprot;
+ unsigned long prot;
unsigned long base = 0, size = 0, limit;
int i;
@@ -660,10 +665,9 @@ void __init htab_initialize(void)
for (i=0; i < lmb.memory.cnt; i++) {
base = (unsigned long)__va(lmb.memory.region[i].base);
size = lmb.memory.region[i].size;
- tprot = prot | (in_kernel_text(base) ? _PAGE_EXEC : 0);
DBG("creating mapping for region: %lx..%lx (prot: %x)\n",
- base, size, tprot);
+ base, size, prot);
#ifdef CONFIG_U3_DART
/* Do not map the DART space. Fortunately, it will be aligned
@@ -680,21 +684,21 @@ void __init htab_initialize(void)
unsigned long dart_table_end = dart_tablebase + 16 * MB;
if (base != dart_tablebase)
BUG_ON(htab_bolt_mapping(base, dart_tablebase,
- __pa(base), tprot,
+ __pa(base), prot,
mmu_linear_psize,
mmu_kernel_ssize));
if ((base + size) > dart_table_end)
BUG_ON(htab_bolt_mapping(dart_tablebase+16*MB,
base + size,
__pa(dart_table_end),
- tprot,
+ prot,
mmu_linear_psize,
mmu_kernel_ssize));
continue;
}
#endif /* CONFIG_U3_DART */
BUG_ON(htab_bolt_mapping(base, base + size, __pa(base),
- tprot, mmu_linear_psize, mmu_kernel_ssize));
+ prot, mmu_linear_psize, mmu_kernel_ssize));
}
/*