summaryrefslogtreecommitdiffstats
path: root/mm/workingset.c
diff options
context:
space:
mode:
authorYang Yang <yang.yang29@zte.com.cn>2023-01-18 20:13:03 +0800
committerAndrew Morton <akpm@linux-foundation.org>2023-02-02 22:33:24 -0800
commit5649d113ffce9f532a9ecc5ab96a93e02efbf283 (patch)
treea7e70d5e0d07a303b61b5e392d6a2157a18c4051 /mm/workingset.c
parent04bac040bc71b4b37550eed5854f34ca161756f9 (diff)
downloadlinux-5649d113ffce9f532a9ecc5ab96a93e02efbf283.tar.gz
linux-5649d113ffce9f532a9ecc5ab96a93e02efbf283.tar.bz2
linux-5649d113ffce9f532a9ecc5ab96a93e02efbf283.zip
swap_state: update shadow_nodes for anonymous page
Shadow_nodes is for shadow nodes reclaiming of workingset handling, it is updated when page cache add or delete since long time ago workingset only supported page cache. But when workingset supports anonymous page detection, we missied updating shadow nodes for it. This caused that shadow nodes of anonymous page will never be reclaimd by scan_shadow_nodes() even they use much memory and system memory is tense. So update shadow_nodes of anonymous page when swap cache is add or delete by calling xas_set_update(..workingset_update_node). Link: https://lkml.kernel.org/r/202301182013032211005@zte.com.cn Fixes: aae466b0052e ("mm/swap: implement workingset detection for anonymous LRU") Signed-off-by: Yang Yang <yang.yang29@zte.com.cn> Reviewed-by: Ran Xiaokai <ran.xiaokai@zte.com.cn> Cc: Bagas Sanjaya <bagasdotme@gmail.com> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Matthew Wilcox <willy@infradead.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
Diffstat (limited to 'mm/workingset.c')
-rw-r--r--mm/workingset.c21
1 files changed, 13 insertions, 8 deletions
diff --git a/mm/workingset.c b/mm/workingset.c
index f194d13beabb..00c6f4d9d9be 100644
--- a/mm/workingset.c
+++ b/mm/workingset.c
@@ -657,11 +657,14 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
goto out;
}
- if (!spin_trylock(&mapping->host->i_lock)) {
- xa_unlock(&mapping->i_pages);
- spin_unlock_irq(lru_lock);
- ret = LRU_RETRY;
- goto out;
+ /* For page cache we need to hold i_lock */
+ if (mapping->host != NULL) {
+ if (!spin_trylock(&mapping->host->i_lock)) {
+ xa_unlock(&mapping->i_pages);
+ spin_unlock_irq(lru_lock);
+ ret = LRU_RETRY;
+ goto out;
+ }
}
list_lru_isolate(lru, item);
@@ -683,9 +686,11 @@ static enum lru_status shadow_lru_isolate(struct list_head *item,
out_invalid:
xa_unlock_irq(&mapping->i_pages);
- if (mapping_shrinkable(mapping))
- inode_add_lru(mapping->host);
- spin_unlock(&mapping->host->i_lock);
+ if (mapping->host != NULL) {
+ if (mapping_shrinkable(mapping))
+ inode_add_lru(mapping->host);
+ spin_unlock(&mapping->host->i_lock);
+ }
ret = LRU_REMOVED_RETRY;
out:
cond_resched();