summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorHugh Dickins <hughd@google.com>2011-08-03 16:21:27 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2011-08-03 14:25:24 -1000
commite504f3fdd63d486d45b18009e5a65f2e329acb0a (patch)
tree2d02a5c29a922fae626a69cd0fc92cae37d7918e /mm
parent31475dd611209413bace21651a400afb91d0bd9d (diff)
downloadlinux-e504f3fdd63d486d45b18009e5a65f2e329acb0a.tar.gz
linux-e504f3fdd63d486d45b18009e5a65f2e329acb0a.tar.bz2
linux-e504f3fdd63d486d45b18009e5a65f2e329acb0a.zip
tmpfs radix_tree: locate_item to speed up swapoff
We have already acknowledged that swapoff of a tmpfs file is slower than it was before conversion to the generic radix_tree: a little slower there will be acceptable, if the hotter paths are faster. But it was a shock to find swapoff of a 500MB file 20 times slower on my laptop, taking 10 minutes; and at that rate it significantly slows down my testing. Now, most of that turned out to be overhead from PROVE_LOCKING and PROVE_RCU: without those it was only 4 times slower than before; and more realistic tests on other machines don't fare as badly. I've tried a number of things to improve it, including tagging the swap entries, then doing lookup by tag: I'd expected that to halve the time, but in practice it's erratic, and often counter-productive. The only change I've so far found to make a consistent improvement, is to short-circuit the way we go back and forth, gang lookup packing entries into the array supplied, then shmem scanning that array for the target entry. Scanning in place doubles the speed, so it's now only twice as slow as before (or three times slower when the PROVEs are on). So, add radix_tree_locate_item() as an expedient, once-off, single-caller hack to do the lookup directly in place. #ifdef it on CONFIG_SHMEM and CONFIG_SWAP, as much to document its limited applicability as save space in other configurations. And, sadly, #include sched.h for cond_resched(). Signed-off-by: Hugh Dickins <hughd@google.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/shmem.c38
1 files changed, 1 insertions, 37 deletions
diff --git a/mm/shmem.c b/mm/shmem.c
index 3a5be0feb6af..1c702f6f1241 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -357,42 +357,6 @@ export:
}
/*
- * Lockless lookup of swap entry in radix tree, avoiding refcount on pages.
- */
-static pgoff_t shmem_find_swap(struct address_space *mapping, void *radswap)
-{
- void **slots[PAGEVEC_SIZE];
- pgoff_t indices[PAGEVEC_SIZE];
- unsigned int nr_found;
-
-restart:
- nr_found = 1;
- indices[0] = -1;
- while (nr_found) {
- pgoff_t index = indices[nr_found - 1] + 1;
- unsigned int i;
-
- rcu_read_lock();
- nr_found = radix_tree_gang_lookup_slot(&mapping->page_tree,
- slots, indices, index, PAGEVEC_SIZE);
- for (i = 0; i < nr_found; i++) {
- void *item = radix_tree_deref_slot(slots[i]);
- if (radix_tree_deref_retry(item)) {
- rcu_read_unlock();
- goto restart;
- }
- if (item == radswap) {
- rcu_read_unlock();
- return indices[i];
- }
- }
- rcu_read_unlock();
- cond_resched();
- }
- return -1;
-}
-
-/*
* Remove swap entry from radix tree, free the swap and its page cache.
*/
static int shmem_free_swap(struct address_space *mapping,
@@ -612,7 +576,7 @@ static int shmem_unuse_inode(struct shmem_inode_info *info,
int error;
radswap = swp_to_radix_entry(swap);
- index = shmem_find_swap(mapping, radswap);
+ index = radix_tree_locate_item(&mapping->page_tree, radswap);
if (index == -1)
return 0;