summaryrefslogtreecommitdiffstats
path: root/arch/arm/mm
diff options
context:
space:
mode:
authorRussell King <rmk+kernel@armlinux.org.uk>2016-11-21 16:02:08 +0000
committerRussell King <rmk+kernel@armlinux.org.uk>2016-11-29 18:00:34 +0000
commit580218f9678e76f712a1cf6cff5a903917fa9558 (patch)
treec8c0e0f89a69097f23c10f8b97de5918f0859efa /arch/arm/mm
parenta85b2257a57d5b3328fdecec9255c6d30c67c8bb (diff)
downloadlinux-580218f9678e76f712a1cf6cff5a903917fa9558.tar.gz
linux-580218f9678e76f712a1cf6cff5a903917fa9558.tar.bz2
linux-580218f9678e76f712a1cf6cff5a903917fa9558.zip
ARM: mm: fix set_memory_*() bounds checks
The set_memory_*() bounds checks are buggy on several fronts: 1. They fail to round the region size up if the passed address is not page aligned. 2. The region check was incomplete, and didn't correspond with what was being asked of apply_to_page_range() So, rework change_memory_common() to fix these problems, adding an "in_region()" helper to determine whether the start & size fit within the provided region start and stop addresses. Signed-off-by: Russell King <rmk+kernel@armlinux.org.uk>
Diffstat (limited to 'arch/arm/mm')
-rw-r--r--arch/arm/mm/pageattr.c26
1 files changed, 13 insertions, 13 deletions
diff --git a/arch/arm/mm/pageattr.c b/arch/arm/mm/pageattr.c
index d19b1ad29b07..6cb0d8ea9138 100644
--- a/arch/arm/mm/pageattr.c
+++ b/arch/arm/mm/pageattr.c
@@ -34,28 +34,28 @@ static int change_page_range(pte_t *ptep, pgtable_t token, unsigned long addr,
return 0;
}
+static bool in_range(unsigned long start, unsigned long size,
+ unsigned long range_start, unsigned long range_end)
+{
+ return start >= range_start && start < range_end &&
+ size <= range_end - start;
+}
+
static int change_memory_common(unsigned long addr, int numpages,
pgprot_t set_mask, pgprot_t clear_mask)
{
- unsigned long start = addr;
- unsigned long size = PAGE_SIZE*numpages;
- unsigned long end = start + size;
+ unsigned long start = addr & PAGE_MASK;
+ unsigned long end = PAGE_ALIGN(addr) + numpages * PAGE_SIZE;
+ unsigned long size = end - start;
int ret;
struct page_change_data data;
- if (!IS_ALIGNED(addr, PAGE_SIZE)) {
- start &= PAGE_MASK;
- end = start + size;
- WARN_ON_ONCE(1);
- }
+ WARN_ON_ONCE(start != addr);
- if (!numpages)
+ if (!size)
return 0;
- if (start < MODULES_VADDR || start >= MODULES_END)
- return -EINVAL;
-
- if (end < MODULES_VADDR || start >= MODULES_END)
+ if (!in_range(start, size, MODULES_VADDR, MODULES_END))
return -EINVAL;
data.set_mask = set_mask;