summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
Diffstat (limited to 'mm')
-rw-r--r--mm/fadvise.c18
-rw-r--r--mm/filemap.c9
-rw-r--r--mm/fremap.c4
-rw-r--r--mm/madvise.c2
-rw-r--r--mm/memcontrol.c130
-rw-r--r--mm/memory.c24
-rw-r--r--mm/mempolicy.c24
-rw-r--r--mm/migrate.c8
-rw-r--r--mm/mincore.c4
-rw-r--r--mm/mlock.c8
-rw-r--r--mm/mmap.c4
-rw-r--r--mm/mprotect.c4
-rw-r--r--mm/mremap.c6
-rw-r--r--mm/msync.c2
-rw-r--r--mm/nommu.c11
-rw-r--r--mm/swapfile.c4
-rw-r--r--mm/vmalloc.c20
17 files changed, 181 insertions, 101 deletions
diff --git a/mm/fadvise.c b/mm/fadvise.c
index a1da969bd980..54a0f8040afa 100644
--- a/mm/fadvise.c
+++ b/mm/fadvise.c
@@ -24,7 +24,7 @@
* POSIX_FADV_WILLNEED could set PG_Referenced, and POSIX_FADV_NOREUSE could
* deactivate the pages and clear PG_Referenced.
*/
-asmlinkage long sys_fadvise64_64(int fd, loff_t offset, loff_t len, int advice)
+SYSCALL_DEFINE(fadvise64_64)(int fd, loff_t offset, loff_t len, int advice)
{
struct file *file = fget(fd);
struct address_space *mapping;
@@ -126,12 +126,26 @@ out:
fput(file);
return ret;
}
+#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
+asmlinkage long SyS_fadvise64_64(long fd, loff_t offset, loff_t len, long advice)
+{
+ return SYSC_fadvise64_64((int) fd, offset, len, (int) advice);
+}
+SYSCALL_ALIAS(sys_fadvise64_64, SyS_fadvise64_64);
+#endif
#ifdef __ARCH_WANT_SYS_FADVISE64
-asmlinkage long sys_fadvise64(int fd, loff_t offset, size_t len, int advice)
+SYSCALL_DEFINE(fadvise64)(int fd, loff_t offset, size_t len, int advice)
{
return sys_fadvise64_64(fd, offset, len, advice);
}
+#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
+asmlinkage long SyS_fadvise64(long fd, loff_t offset, long len, long advice)
+{
+ return SYSC_fadvise64((int) fd, offset, (size_t)len, (int)advice);
+}
+SYSCALL_ALIAS(sys_fadvise64, SyS_fadvise64);
+#endif
#endif
diff --git a/mm/filemap.c b/mm/filemap.c
index ceba0bd03662..23acefe51808 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1374,7 +1374,7 @@ do_readahead(struct address_space *mapping, struct file *filp,
return 0;
}
-asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
+SYSCALL_DEFINE(readahead)(int fd, loff_t offset, size_t count)
{
ssize_t ret;
struct file *file;
@@ -1393,6 +1393,13 @@ asmlinkage ssize_t sys_readahead(int fd, loff_t offset, size_t count)
}
return ret;
}
+#ifdef CONFIG_HAVE_SYSCALL_WRAPPERS
+asmlinkage long SyS_readahead(long fd, loff_t offset, long count)
+{
+ return SYSC_readahead((int) fd, offset, (size_t) count);
+}
+SYSCALL_ALIAS(sys_readahead, SyS_readahead);
+#endif
#ifdef CONFIG_MMU
/**
diff --git a/mm/fremap.c b/mm/fremap.c
index 62d5bbda921a..736ba7f3306a 100644
--- a/mm/fremap.c
+++ b/mm/fremap.c
@@ -120,8 +120,8 @@ static int populate_range(struct mm_struct *mm, struct vm_area_struct *vma,
* and the vma's default protection is used. Arbitrary protections
* might be implemented in the future.
*/
-asmlinkage long sys_remap_file_pages(unsigned long start, unsigned long size,
- unsigned long prot, unsigned long pgoff, unsigned long flags)
+SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size,
+ unsigned long, prot, unsigned long, pgoff, unsigned long, flags)
{
struct mm_struct *mm = current->mm;
struct address_space *mapping;
diff --git a/mm/madvise.c b/mm/madvise.c
index f9349c18a1b5..b9ce574827c8 100644
--- a/mm/madvise.c
+++ b/mm/madvise.c
@@ -281,7 +281,7 @@ madvise_vma(struct vm_area_struct *vma, struct vm_area_struct **prev,
* -EBADF - map exists, but area maps something that isn't a file.
* -EAGAIN - a kernel resource was temporarily unavailable.
*/
-asmlinkage long sys_madvise(unsigned long start, size_t len_in, int behavior)
+SYSCALL_DEFINE3(madvise, unsigned long, start, size_t, len_in, int, behavior)
{
unsigned long end, tmp;
struct vm_area_struct * vma, *prev;
diff --git a/mm/memcontrol.c b/mm/memcontrol.c
index e2996b80601f..4d0ea3ceba6d 100644
--- a/mm/memcontrol.c
+++ b/mm/memcontrol.c
@@ -358,6 +358,10 @@ void mem_cgroup_rotate_lru_list(struct page *page, enum lru_list lru)
return;
pc = lookup_page_cgroup(page);
+ /*
+ * Used bit is set without atomic ops but after smp_wmb().
+ * For making pc->mem_cgroup visible, insert smp_rmb() here.
+ */
smp_rmb();
/* unused page is not rotated. */
if (!PageCgroupUsed(pc))
@@ -374,7 +378,10 @@ void mem_cgroup_add_lru_list(struct page *page, enum lru_list lru)
if (mem_cgroup_disabled())
return;
pc = lookup_page_cgroup(page);
- /* barrier to sync with "charge" */
+ /*
+ * Used bit is set without atomic ops but after smp_wmb().
+ * For making pc->mem_cgroup visible, insert smp_rmb() here.
+ */
smp_rmb();
if (!PageCgroupUsed(pc))
return;
@@ -559,6 +566,14 @@ mem_cgroup_get_reclaim_stat_from_page(struct page *page)
return NULL;
pc = lookup_page_cgroup(page);
+ /*
+ * Used bit is set without atomic ops but after smp_wmb().
+ * For making pc->mem_cgroup visible, insert smp_rmb() here.
+ */
+ smp_rmb();
+ if (!PageCgroupUsed(pc))
+ return NULL;
+
mz = page_cgroup_zoneinfo(pc);
if (!mz)
return NULL;
@@ -618,7 +633,7 @@ unsigned long mem_cgroup_isolate_pages(unsigned long nr_to_scan,
* called with hierarchy_mutex held
*/
static struct mem_cgroup *
-mem_cgroup_get_next_node(struct mem_cgroup *curr, struct mem_cgroup *root_mem)
+__mem_cgroup_get_next_node(struct mem_cgroup *curr, struct mem_cgroup *root_mem)
{
struct cgroup *cgroup, *curr_cgroup, *root_cgroup;
@@ -629,19 +644,16 @@ mem_cgroup_get_next_node(struct mem_cgroup *curr, struct mem_cgroup *root_mem)
/*
* Walk down to children
*/
- mem_cgroup_put(curr);
cgroup = list_entry(curr_cgroup->children.next,
struct cgroup, sibling);
curr = mem_cgroup_from_cont(cgroup);
- mem_cgroup_get(curr);
goto done;
}
visit_parent:
if (curr_cgroup == root_cgroup) {
- mem_cgroup_put(curr);
- curr = root_mem;
- mem_cgroup_get(curr);
+ /* caller handles NULL case */
+ curr = NULL;
goto done;
}
@@ -649,11 +661,9 @@ visit_parent:
* Goto next sibling
*/
if (curr_cgroup->sibling.next != &curr_cgroup->parent->children) {
- mem_cgroup_put(curr);
cgroup = list_entry(curr_cgroup->sibling.next, struct cgroup,
sibling);
curr = mem_cgroup_from_cont(cgroup);
- mem_cgroup_get(curr);
goto done;
}
@@ -664,7 +674,6 @@ visit_parent:
goto visit_parent;
done:
- root_mem->last_scanned_child = curr;
return curr;
}
@@ -674,40 +683,46 @@ done:
* that to reclaim free pages from.
*/
static struct mem_cgroup *
-mem_cgroup_get_first_node(struct mem_cgroup *root_mem)
+mem_cgroup_get_next_node(struct mem_cgroup *root_mem)
{
struct cgroup *cgroup;
- struct mem_cgroup *ret;
+ struct mem_cgroup *orig, *next;
bool obsolete;
- obsolete = mem_cgroup_is_obsolete(root_mem->last_scanned_child);
-
/*
* Scan all children under the mem_cgroup mem
*/
mutex_lock(&mem_cgroup_subsys.hierarchy_mutex);
+
+ orig = root_mem->last_scanned_child;
+ obsolete = mem_cgroup_is_obsolete(orig);
+
if (list_empty(&root_mem->css.cgroup->children)) {
- ret = root_mem;
+ /*
+ * root_mem might have children before and last_scanned_child
+ * may point to one of them. We put it later.
+ */
+ if (orig)
+ VM_BUG_ON(!obsolete);
+ next = NULL;
goto done;
}
- if (!root_mem->last_scanned_child || obsolete) {
-
- if (obsolete && root_mem->last_scanned_child)
- mem_cgroup_put(root_mem->last_scanned_child);
-
+ if (!orig || obsolete) {
cgroup = list_first_entry(&root_mem->css.cgroup->children,
struct cgroup, sibling);
- ret = mem_cgroup_from_cont(cgroup);
- mem_cgroup_get(ret);
+ next = mem_cgroup_from_cont(cgroup);
} else
- ret = mem_cgroup_get_next_node(root_mem->last_scanned_child,
- root_mem);
+ next = __mem_cgroup_get_next_node(orig, root_mem);
done:
- root_mem->last_scanned_child = ret;
+ if (next)
+ mem_cgroup_get(next);
+ root_mem->last_scanned_child = next;
+ if (orig)
+ mem_cgroup_put(orig);
mutex_unlock(&mem_cgroup_subsys.hierarchy_mutex);
- return ret;
+ return (next) ? next : root_mem;
}
static bool mem_cgroup_check_under_limit(struct mem_cgroup *mem)
@@ -758,28 +773,25 @@ static int mem_cgroup_hierarchical_reclaim(struct mem_cgroup *root_mem,
* but there might be left over accounting, even after children
* have left.
*/
- ret = try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap,
+ ret += try_to_free_mem_cgroup_pages(root_mem, gfp_mask, noswap,
get_swappiness(root_mem));
if (mem_cgroup_check_under_limit(root_mem))
- return 0;
+ return 1; /* indicate reclaim has succeeded */
if (!root_mem->use_hierarchy)
return ret;
- next_mem = mem_cgroup_get_first_node(root_mem);
+ next_mem = mem_cgroup_get_next_node(root_mem);
while (next_mem != root_mem) {
if (mem_cgroup_is_obsolete(next_mem)) {
- mem_cgroup_put(next_mem);
- next_mem = mem_cgroup_get_first_node(root_mem);
+ next_mem = mem_cgroup_get_next_node(root_mem);
continue;
}
- ret = try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap,
+ ret += try_to_free_mem_cgroup_pages(next_mem, gfp_mask, noswap,
get_swappiness(next_mem));
if (mem_cgroup_check_under_limit(root_mem))
- return 0;
- mutex_lock(&mem_cgroup_subsys.hierarchy_mutex);
- next_mem = mem_cgroup_get_next_node(next_mem, root_mem);
- mutex_unlock(&mem_cgroup_subsys.hierarchy_mutex);
+ return 1; /* indicate reclaim has succeeded */
+ next_mem = mem_cgroup_get_next_node(root_mem);
}
return ret;
}
@@ -863,6 +875,8 @@ static int __mem_cgroup_try_charge(struct mm_struct *mm,
ret = mem_cgroup_hierarchical_reclaim(mem_over_limit, gfp_mask,
noswap);
+ if (ret)
+ continue;
/*
* try_to_free_mem_cgroup_pages() might not give us a full
@@ -979,14 +993,15 @@ static int mem_cgroup_move_account(struct page_cgroup *pc,
if (pc->mem_cgroup != from)
goto out;
- css_put(&from->css);
res_counter_uncharge(&from->res, PAGE_SIZE);
mem_cgroup_charge_statistics(from, pc, false);
if (do_swap_account)
res_counter_uncharge(&from->memsw, PAGE_SIZE);
+ css_put(&from->css);
+
+ css_get(&to->css);
pc->mem_cgroup = to;
mem_cgroup_charge_statistics(to, pc, true);
- css_get(&to->css);
ret = 0;
out:
unlock_page_cgroup(pc);
@@ -1019,8 +1034,10 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc,
if (ret || !parent)
return ret;
- if (!get_page_unless_zero(page))
- return -EBUSY;
+ if (!get_page_unless_zero(page)) {
+ ret = -EBUSY;
+ goto uncharge;
+ }
ret = isolate_lru_page(page);
@@ -1029,19 +1046,23 @@ static int mem_cgroup_move_parent(struct page_cgroup *pc,
ret = mem_cgroup_move_account(pc, child, parent);
- /* drop extra refcnt by try_charge() (move_account increment one) */
- css_put(&parent->css);
putback_lru_page(page);
if (!ret) {
put_page(page);
+ /* drop extra refcnt by try_charge() */
+ css_put(&parent->css);
return 0;
}
- /* uncharge if move fails */
+
cancel:
+ put_page(page);
+uncharge:
+ /* drop extra refcnt by try_charge() */
+ css_put(&parent->css);
+ /* uncharge if move fails */
res_counter_uncharge(&parent->res, PAGE_SIZE);
if (do_swap_account)
res_counter_uncharge(&parent->memsw, PAGE_SIZE);
- put_page(page);
return ret;
}
@@ -1971,6 +1992,7 @@ static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
{
struct mem_cgroup *memcg = mem_cgroup_from_cont(cgrp);
struct mem_cgroup *parent;
+
if (val > 100)
return -EINVAL;
@@ -1978,15 +2000,22 @@ static int mem_cgroup_swappiness_write(struct cgroup *cgrp, struct cftype *cft,
return -EINVAL;
parent = mem_cgroup_from_cont(cgrp->parent);
+
+ cgroup_lock();
+
/* If under hierarchy, only empty-root can set this value */
if ((parent->use_hierarchy) ||
- (memcg->use_hierarchy && !list_empty(&cgrp->children)))
+ (memcg->use_hierarchy && !list_empty(&cgrp->children))) {
+ cgroup_unlock();
return -EINVAL;
+ }
spin_lock(&memcg->reclaim_param_lock);
memcg->swappiness = val;
spin_unlock(&memcg->reclaim_param_lock);
+ cgroup_unlock();
+
return 0;
}
@@ -2181,7 +2210,7 @@ static void __init enable_swap_cgroup(void)
}
#endif
-static struct cgroup_subsys_state *
+static struct cgroup_subsys_state * __ref
mem_cgroup_create(struct cgroup_subsys *ss, struct cgroup *cont)
{
struct mem_cgroup *mem, *parent;
@@ -2232,7 +2261,14 @@ static void mem_cgroup_pre_destroy(struct cgroup_subsys *ss,
static void mem_cgroup_destroy(struct cgroup_subsys *ss,
struct cgroup *cont)
{
- mem_cgroup_put(mem_cgroup_from_cont(cont));
+ struct mem_cgroup *mem = mem_cgroup_from_cont(cont);
+ struct mem_cgroup *last_scanned_child = mem->last_scanned_child;
+
+ if (last_scanned_child) {
+ VM_BUG_ON(!mem_cgroup_is_obsolete(last_scanned_child));
+ mem_cgroup_put(last_scanned_child);
+ }
+ mem_cgroup_put(mem);
}
static int mem_cgroup_populate(struct cgroup_subsys *ss,
diff --git a/mm/memory.c b/mm/memory.c
index e009ce870859..22bfa7a47a0b 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -1511,6 +1511,7 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn)
{
int ret;
+ pgprot_t pgprot = vma->vm_page_prot;
/*
* Technically, architectures with pte_special can avoid all these
* restrictions (same for remap_pfn_range). However we would like
@@ -1525,10 +1526,10 @@ int vm_insert_pfn(struct vm_area_struct *vma, unsigned long addr,
if (addr < vma->vm_start || addr >= vma->vm_end)
return -EFAULT;
- if (track_pfn_vma_new(vma, vma->vm_page_prot, pfn, PAGE_SIZE))
+ if (track_pfn_vma_new(vma, &pgprot, pfn, PAGE_SIZE))
return -EINVAL;
- ret = insert_pfn(vma, addr, pfn, vma->vm_page_prot);
+ ret = insert_pfn(vma, addr, pfn, pgprot);
if (ret)
untrack_pfn_vma(vma, pfn, PAGE_SIZE);
@@ -1671,9 +1672,15 @@ int remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP;
- err = track_pfn_vma_new(vma, prot, pfn, PAGE_ALIGN(size));
- if (err)
+ err = track_pfn_vma_new(vma, &prot, pfn, PAGE_ALIGN(size));
+ if (err) {
+ /*
+ * To indicate that track_pfn related cleanup is not
+ * needed from higher level routine calling unmap_vmas
+ */
+ vma->vm_flags &= ~(VM_IO | VM_RESERVED | VM_PFNMAP);
return -EINVAL;
+ }
BUG_ON(addr >= end);
pfn -= addr >> PAGE_SHIFT;
@@ -3165,6 +3172,15 @@ void print_vma_addr(char *prefix, unsigned long ip)
#ifdef CONFIG_PROVE_LOCKING
void might_fault(void)
{
+ /*
+ * Some code (nfs/sunrpc) uses socket ops on kernel memory while
+ * holding the mmap_sem, this is safe because kernel memory doesn't
+ * get paged out, therefore we'll never actually fault, and the
+ * below annotations will generate false positives.
+ */
+ if (segment_eq(get_fs(), KERNEL_DS))
+ return;
+
might_sleep();
/*
* it would be nicer only to annotate paths which are not under
diff --git a/mm/mempolicy.c b/mm/mempolicy.c
index e412ffa8e52e..3eb4a6fdc043 100644
--- a/mm/mempolicy.c
+++ b/mm/mempolicy.c
@@ -1068,10 +1068,9 @@ static int copy_nodes_to_user(unsigned long __user *mask, unsigned long maxnode,
return copy_to_user(mask, nodes_addr(*nodes), copy) ? -EFAULT : 0;
}
-asmlinkage long sys_mbind(unsigned long start, unsigned long len,
- unsigned long mode,
- unsigned long __user *nmask, unsigned long maxnode,
- unsigned flags)
+SYSCALL_DEFINE6(mbind, unsigned long, start, unsigned long, len,
+ unsigned long, mode, unsigned long __user *, nmask,
+ unsigned long, maxnode, unsigned, flags)
{
nodemask_t nodes;
int err;
@@ -1091,8 +1090,8 @@ asmlinkage long sys_mbind(unsigned long start, unsigned long len,
}
/* Set the process memory policy */
-asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
- unsigned long maxnode)
+SYSCALL_DEFINE3(set_mempolicy, int, mode, unsigned long __user *, nmask,
+ unsigned long, maxnode)
{
int err;
nodemask_t nodes;
@@ -1110,9 +1109,9 @@ asmlinkage long sys_set_mempolicy(int mode, unsigned long __user *nmask,
return do_set_mempolicy(mode, flags, &nodes);
}
-asmlinkage long sys_migrate_pages(pid_t pid, unsigned long maxnode,
- const unsigned long __user *old_nodes,
- const unsigned long __user *new_nodes)
+SYSCALL_DEFINE4(migrate_pages, pid_t, pid, unsigned long, maxnode,
+ const unsigned long __user *, old_nodes,
+ const unsigned long __user *, new_nodes)
{
const struct cred *cred = current_cred(), *tcred;
struct mm_struct *mm;
@@ -1185,10 +1184,9 @@ out:
/* Retrieve NUMA policy */
-asmlinkage long sys_get_mempolicy(int __user *policy,
- unsigned long __user *nmask,
- unsigned long maxnode,
- unsigned long addr, unsigned long flags)
+SYSCALL_DEFINE5(get_mempolicy, int __user *, policy,
+ unsigned long __user *, nmask, unsigned long, maxnode,
+ unsigned long, addr, unsigned long, flags)
{
int err;
int uninitialized_var(pval);
diff --git a/mm/migrate.c b/mm/migrate.c
index a30ea5fcf9f1..2bb4e1d63520 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -1055,10 +1055,10 @@ out:
* Move a list of pages in the address space of the currently executing
* process.
*/
-asmlinkage long sys_move_pages(pid_t pid, unsigned long nr_pages,
- const void __user * __user *pages,
- const int __user *nodes,
- int __user *status, int flags)
+SYSCALL_DEFINE6(move_pages, pid_t, pid, unsigned long, nr_pages,
+ const void __user * __user *, pages,
+ const int __user *, nodes,
+ int __user *, status, int, flags)
{
const struct cred *cred = current_cred(), *tcred;
struct task_struct *task;
diff --git a/mm/mincore.c b/mm/mincore.c
index 5178800bc129..8cb508f84ea4 100644
--- a/mm/mincore.c
+++ b/mm/mincore.c
@@ -177,8 +177,8 @@ none_mapped:
* mapped
* -EAGAIN - A kernel resource was temporarily unavailable.
*/
-asmlinkage long sys_mincore(unsigned long start, size_t len,
- unsigned char __user * vec)
+SYSCALL_DEFINE3(mincore, unsigned long, start, size_t, len,
+ unsigned char __user *, vec)
{
long retval;
unsigned long pages;
diff --git a/mm/mlock.c b/mm/mlock.c
index e125156c664e..2904a347e476 100644
--- a/mm/mlock.c
+++ b/mm/mlock.c
@@ -530,7 +530,7 @@ static int do_mlock(unsigned long start, size_t len, int on)
return error;
}
-asmlinkage long sys_mlock(unsigned long start, size_t len)
+SYSCALL_DEFINE2(mlock, unsigned long, start, size_t, len)
{
unsigned long locked;
unsigned long lock_limit;
@@ -558,7 +558,7 @@ asmlinkage long sys_mlock(unsigned long start, size_t len)
return error;
}
-asmlinkage long sys_munlock(unsigned long start, size_t len)
+SYSCALL_DEFINE2(munlock, unsigned long, start, size_t, len)
{
int ret;
@@ -595,7 +595,7 @@ out:
return 0;
}
-asmlinkage long sys_mlockall(int flags)
+SYSCALL_DEFINE1(mlockall, int, flags)
{
unsigned long lock_limit;
int ret = -EINVAL;
@@ -623,7 +623,7 @@ out:
return ret;
}
-asmlinkage long sys_munlockall(void)
+SYSCALL_DEFINE0(munlockall)
{
int ret;
diff --git a/mm/mmap.c b/mm/mmap.c
index 749623196cb9..8d95902e9a38 100644
--- a/mm/mmap.c
+++ b/mm/mmap.c
@@ -245,7 +245,7 @@ static struct vm_area_struct *remove_vma(struct vm_area_struct *vma)
return next;
}
-asmlinkage unsigned long sys_brk(unsigned long brk)
+SYSCALL_DEFINE1(brk, unsigned long, brk)
{
unsigned long rlim, retval;
unsigned long newbrk, oldbrk;
@@ -1948,7 +1948,7 @@ int do_munmap(struct mm_struct *mm, unsigned long start, size_t len)
EXPORT_SYMBOL(do_munmap);
-asmlinkage long sys_munmap(unsigned long addr, size_t len)
+SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
{
int ret;
struct mm_struct *mm = current->mm;
diff --git a/mm/mprotect.c b/mm/mprotect.c
index d0f6e7ce09f1..abe2694e13f4 100644
--- a/mm/mprotect.c
+++ b/mm/mprotect.c
@@ -217,8 +217,8 @@ fail:
return error;
}
-asmlinkage long
-sys_mprotect(unsigned long start, size_t len, unsigned long prot)
+SYSCALL_DEFINE3(mprotect, unsigned long, start, size_t, len,
+ unsigned long, prot)
{
unsigned long vm_flags, nstart, end, tmp, reqprot;
struct vm_area_struct *vma, *prev;
diff --git a/mm/mremap.c b/mm/mremap.c
index 646de959aa58..a39b7b91be46 100644
--- a/mm/mremap.c
+++ b/mm/mremap.c
@@ -420,9 +420,9 @@ out_nc:
return ret;
}
-asmlinkage unsigned long sys_mremap(unsigned long addr,
- unsigned long old_len, unsigned long new_len,
- unsigned long flags, unsigned long new_addr)
+SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
+ unsigned long, new_len, unsigned long, flags,
+ unsigned long, new_addr)
{
unsigned long ret;
diff --git a/mm/msync.c b/mm/msync.c
index 07dae08cf31c..4083209b7f02 100644
--- a/mm/msync.c
+++ b/mm/msync.c
@@ -28,7 +28,7 @@
* So by _not_ starting I/O in MS_ASYNC we provide complete flexibility to
* applications.
*/
-asmlinkage long sys_msync(unsigned long start, size_t len, int flags)
+SYSCALL_DEFINE3(msync, unsigned long, start, size_t, len, int, flags)
{
unsigned long end;
struct mm_struct *mm = current->mm;
diff --git a/mm/nommu.c b/mm/nommu.c
index 60ed8375c986..8cee8c8ff0f2 100644
--- a/mm/nommu.c
+++ b/mm/nommu.c
@@ -416,7 +416,7 @@ EXPORT_SYMBOL(vm_insert_page);
* to a regular file. in this case, the unmapping will need
* to invoke file system routines that need the global lock.
*/
-asmlinkage unsigned long sys_brk(unsigned long brk)
+SYSCALL_DEFINE1(brk, unsigned long, brk)
{
struct mm_struct *mm = current->mm;
@@ -1573,7 +1573,7 @@ erase_whole_vma:
}
EXPORT_SYMBOL(do_munmap);
-asmlinkage long sys_munmap(unsigned long addr, size_t len)
+SYSCALL_DEFINE2(munmap, unsigned long, addr, size_t, len)
{
int ret;
struct mm_struct *mm = current->mm;
@@ -1657,10 +1657,9 @@ unsigned long do_mremap(unsigned long addr,
}
EXPORT_SYMBOL(do_mremap);
-asmlinkage
-unsigned long sys_mremap(unsigned long addr,
- unsigned long old_len, unsigned long new_len,
- unsigned long flags, unsigned long new_addr)
+SYSCALL_DEFINE5(mremap, unsigned long, addr, unsigned long, old_len,
+ unsigned long, new_len, unsigned long, flags,
+ unsigned long, new_addr)
{
unsigned long ret;
diff --git a/mm/swapfile.c b/mm/swapfile.c
index da422c47e2ee..f48b831e5e5c 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1377,7 +1377,7 @@ out:
return ret;
}
-asmlinkage long sys_swapoff(const char __user * specialfile)
+SYSCALL_DEFINE1(swapoff, const char __user *, specialfile)
{
struct swap_info_struct * p = NULL;
unsigned short *swap_map;
@@ -1633,7 +1633,7 @@ late_initcall(max_swapfiles_check);
*
* The swapon system call
*/
-asmlinkage long sys_swapon(const char __user * specialfile, int swap_flags)
+SYSCALL_DEFINE2(swapon, const char __user *, specialfile, int, swap_flags)
{
struct swap_info_struct * p;
char *name = NULL;
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index c5db9a7264d9..75f49d312e8c 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -14,7 +14,6 @@
#include <linux/highmem.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
-#include <linux/mutex.h>
#include <linux/interrupt.h>
#include <linux/proc_fs.h>
#include <linux/seq_file.h>
@@ -24,6 +23,7 @@
#include <linux/rbtree.h>
#include <linux/radix-tree.h>
#include <linux/rcupdate.h>
+#include <linux/bootmem.h>
#include <asm/atomic.h>
#include <asm/uaccess.h>
@@ -495,7 +495,7 @@ static atomic_t vmap_lazy_nr = ATOMIC_INIT(0);
static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
int sync, int force_flush)
{
- static DEFINE_MUTEX(purge_lock);
+ static DEFINE_SPINLOCK(purge_lock);
LIST_HEAD(valist);
struct vmap_area *va;
int nr = 0;
@@ -506,10 +506,10 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
* the case that isn't actually used at the moment anyway.
*/
if (!sync && !force_flush) {
- if (!mutex_trylock(&purge_lock))
+ if (!spin_trylock(&purge_lock))
return;
} else
- mutex_lock(&purge_lock);
+ spin_lock(&purge_lock);
rcu_read_lock();
list_for_each_entry_rcu(va, &vmap_area_list, list) {
@@ -541,7 +541,7 @@ static void __purge_vmap_area_lazy(unsigned long *start, unsigned long *end,
__free_vmap_area(va);
spin_unlock(&vmap_area_lock);
}
- mutex_unlock(&purge_lock);
+ spin_unlock(&purge_lock);
}
/*
@@ -984,6 +984,8 @@ EXPORT_SYMBOL(vm_map_ram);
void __init vmalloc_init(void)
{
+ struct vmap_area *va;
+ struct vm_struct *tmp;
int i;
for_each_possible_cpu(i) {
@@ -996,6 +998,14 @@ void __init vmalloc_init(void)
vbq->nr_dirty = 0;
}
+ /* Import existing vmlist entries. */
+ for (tmp = vmlist; tmp; tmp = tmp->next) {
+ va = alloc_bootmem(sizeof(struct vmap_area));
+ va->flags = tmp->flags | VM_VM_AREA;
+ va->va_start = (unsigned long)tmp->addr;
+ va->va_end = va->va_start + tmp->size;
+ __insert_vmap_area(va);
+ }
vmap_initialized = true;
}