summaryrefslogtreecommitdiffstats
path: root/mm/mmap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/mmap.c')
-rw-r--r--mm/mmap.c27
1 files changed, 14 insertions, 13 deletions
diff --git a/mm/mmap.c b/mm/mmap.c
index a5e3dcd75e79..f19efcf75418 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -94,7 +94,7 @@ static void unmap_region(struct mm_struct *mm,
* w: (no) no
* x: (yes) yes
*/
-pgprot_t protection_map[16] = {
+pgprot_t protection_map[16] __ro_after_init = {
__P000, __P001, __P010, __P011, __P100, __P101, __P110, __P111,
__S000, __S001, __S010, __S011, __S100, __S101, __S110, __S111
};
@@ -2177,7 +2177,6 @@ static int acct_stack_growth(struct vm_area_struct *vma,
unsigned long size, unsigned long grow)
{
struct mm_struct *mm = vma->vm_mm;
- struct rlimit *rlim = current->signal->rlim;
unsigned long new_start;
/* address space limit tests */
@@ -2185,7 +2184,7 @@ static int acct_stack_growth(struct vm_area_struct *vma,
return -ENOMEM;
/* Stack limit test */
- if (size > READ_ONCE(rlim[RLIMIT_STACK].rlim_cur))
+ if (size > rlimit(RLIMIT_STACK))
return -ENOMEM;
/* mlock limit tests */
@@ -2193,7 +2192,7 @@ static int acct_stack_growth(struct vm_area_struct *vma,
unsigned long locked;
unsigned long limit;
locked = mm->locked_vm + grow;
- limit = READ_ONCE(rlim[RLIMIT_MEMLOCK].rlim_cur);
+ limit = rlimit(RLIMIT_MEMLOCK);
limit >>= PAGE_SHIFT;
if (locked > limit && !capable(CAP_IPC_LOCK))
return -ENOMEM;
@@ -2232,7 +2231,7 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
/* Guard against exceeding limits of the address space. */
address &= PAGE_MASK;
- if (address >= TASK_SIZE)
+ if (address >= (TASK_SIZE & PAGE_MASK))
return -ENOMEM;
address += PAGE_SIZE;
@@ -2244,7 +2243,8 @@ int expand_upwards(struct vm_area_struct *vma, unsigned long address)
gap_addr = TASK_SIZE;
next = vma->vm_next;
- if (next && next->vm_start < gap_addr) {
+ if (next && next->vm_start < gap_addr &&
+ (next->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) {
if (!(next->vm_flags & VM_GROWSUP))
return -ENOMEM;
/* Check that both stack segments have the same anon_vma? */
@@ -2315,7 +2315,6 @@ int expand_downwards(struct vm_area_struct *vma,
{
struct mm_struct *mm = vma->vm_mm;
struct vm_area_struct *prev;
- unsigned long gap_addr;
int error;
address &= PAGE_MASK;
@@ -2324,14 +2323,12 @@ int expand_downwards(struct vm_area_struct *vma,
return error;
/* Enforce stack_guard_gap */
- gap_addr = address - stack_guard_gap;
- if (gap_addr > address)
- return -ENOMEM;
prev = vma->vm_prev;
- if (prev && prev->vm_end > gap_addr) {
- if (!(prev->vm_flags & VM_GROWSDOWN))
+ /* Check that both stack segments have the same anon_vma? */
+ if (prev && !(prev->vm_flags & VM_GROWSDOWN) &&
+ (prev->vm_flags & (VM_WRITE|VM_READ|VM_EXEC))) {
+ if (address - prev->vm_end < stack_guard_gap)
return -ENOMEM;
- /* Check that both stack segments have the same anon_vma? */
}
/* We must make sure the anon_vma is allocated. */
@@ -3186,8 +3183,12 @@ static int special_mapping_mremap(struct vm_area_struct *new_vma)
{
struct vm_special_mapping *sm = new_vma->vm_private_data;
+ if (WARN_ON_ONCE(current->mm != new_vma->vm_mm))
+ return -EFAULT;
+
if (sm->mremap)
return sm->mremap(sm, new_vma);
+
return 0;
}