summaryrefslogtreecommitdiffstats
path: root/fs/hugetlbfs/inode.c
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@linux-foundation.org>2017-09-06 20:49:49 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2017-09-06 20:49:49 -0700
commitd34fc1adf01ff87026da85fb972dc259dc347540 (patch)
tree27356073d423187157b7cdb69da32b53102fb9e7 /fs/hugetlbfs/inode.c
parent1c9fe4409ce3e9c78b1ed96ee8ed699d4f03bf33 (diff)
parentd2cd9ede6e193dd7d88b6d27399e96229a551b19 (diff)
downloadlinux-d34fc1adf01ff87026da85fb972dc259dc347540.tar.gz
linux-d34fc1adf01ff87026da85fb972dc259dc347540.tar.bz2
linux-d34fc1adf01ff87026da85fb972dc259dc347540.zip
Merge branch 'akpm' (patches from Andrew)
Merge updates from Andrew Morton: - various misc bits - DAX updates - OCFS2 - most of MM * emailed patches from Andrew Morton <akpm@linux-foundation.org>: (119 commits) mm,fork: introduce MADV_WIPEONFORK x86,mpx: make mpx depend on x86-64 to free up VMA flag mm: add /proc/pid/smaps_rollup mm: hugetlb: clear target sub-page last when clearing huge page mm: oom: let oom_reap_task and exit_mmap run concurrently swap: choose swap device according to numa node mm: replace TIF_MEMDIE checks by tsk_is_oom_victim mm, oom: do not rely on TIF_MEMDIE for memory reserves access z3fold: use per-cpu unbuddied lists mm, swap: don't use VMA based swap readahead if HDD is used as swap mm, swap: add sysfs interface for VMA based swap readahead mm, swap: VMA based swap readahead mm, swap: fix swap readahead marking mm, swap: add swap readahead hit statistics mm/vmalloc.c: don't reinvent the wheel but use existing llist API mm/vmstat.c: fix wrong comment selftests/memfd: add memfd_create hugetlbfs selftest mm/shmem: add hugetlbfs support to memfd_create() mm, devm_memremap_pages: use multi-order radix for ZONE_DEVICE lookups mm/vmalloc.c: halve the number of comparisons performed in pcpu_get_vm_areas() ...
Diffstat (limited to 'fs/hugetlbfs/inode.c')
-rw-r--r--fs/hugetlbfs/inode.c30
1 files changed, 7 insertions, 23 deletions
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c
index 28d2753be094..7c02b3f738e1 100644
--- a/fs/hugetlbfs/inode.c
+++ b/fs/hugetlbfs/inode.c
@@ -401,9 +401,8 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
const pgoff_t end = lend >> huge_page_shift(h);
struct vm_area_struct pseudo_vma;
struct pagevec pvec;
- pgoff_t next;
+ pgoff_t next, index;
int i, freed = 0;
- long lookup_nr = PAGEVEC_SIZE;
bool truncate_op = (lend == LLONG_MAX);
memset(&pseudo_vma, 0, sizeof(struct vm_area_struct));
@@ -412,33 +411,19 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
next = start;
while (next < end) {
/*
- * Don't grab more pages than the number left in the range.
- */
- if (end - next < lookup_nr)
- lookup_nr = end - next;
-
- /*
* When no more pages are found, we are done.
*/
- if (!pagevec_lookup(&pvec, mapping, next, lookup_nr))
+ if (!pagevec_lookup_range(&pvec, mapping, &next, end - 1))
break;
for (i = 0; i < pagevec_count(&pvec); ++i) {
struct page *page = pvec.pages[i];
u32 hash;
- /*
- * The page (index) could be beyond end. This is
- * only possible in the punch hole case as end is
- * max page offset in the truncate case.
- */
- next = page->index;
- if (next >= end)
- break;
-
+ index = page->index;
hash = hugetlb_fault_mutex_hash(h, current->mm,
&pseudo_vma,
- mapping, next, 0);
+ mapping, index, 0);
mutex_lock(&hugetlb_fault_mutex_table[hash]);
/*
@@ -455,8 +440,8 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
i_mmap_lock_write(mapping);
hugetlb_vmdelete_list(&mapping->i_mmap,
- next * pages_per_huge_page(h),
- (next + 1) * pages_per_huge_page(h));
+ index * pages_per_huge_page(h),
+ (index + 1) * pages_per_huge_page(h));
i_mmap_unlock_write(mapping);
}
@@ -475,14 +460,13 @@ static void remove_inode_hugepages(struct inode *inode, loff_t lstart,
freed++;
if (!truncate_op) {
if (unlikely(hugetlb_unreserve_pages(inode,
- next, next + 1, 1)))
+ index, index + 1, 1)))
hugetlb_fix_reserve_counts(inode);
}
unlock_page(page);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);
}
- ++next;
huge_pagevec_release(&pvec);
cond_resched();
}