summaryrefslogtreecommitdiffstats
path: root/mm/swap.c
diff options
context:
space:
mode:
authorMel Gorman <mgorman@techsingularity.net>2016-07-28 15:45:28 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2016-07-28 16:07:41 -0700
commita52633d8e9c35832f1409dc5fa166019048a3f1f (patch)
tree489be85b88b8dc0749747d603448bb3669db0d14 /mm/swap.c
parent75ef7184053989118d3814c558a9af62e7376a58 (diff)
downloadlinux-a52633d8e9c35832f1409dc5fa166019048a3f1f.tar.gz
linux-a52633d8e9c35832f1409dc5fa166019048a3f1f.tar.bz2
linux-a52633d8e9c35832f1409dc5fa166019048a3f1f.zip
mm, vmscan: move lru_lock to the node
Node-based reclaim requires node-based LRUs and locking. This is a preparation patch that just moves the lru_lock to the node so later patches are easier to review. It is a mechanical change but note this patch makes contention worse because the LRU lock is hotter and direct reclaim and kswapd can contend on the same lock even when reclaiming from different zones. Link: http://lkml.kernel.org/r/1467970510-21195-3-git-send-email-mgorman@techsingularity.net Signed-off-by: Mel Gorman <mgorman@techsingularity.net> Reviewed-by: Minchan Kim <minchan@kernel.org> Acked-by: Johannes Weiner <hannes@cmpxchg.org> Acked-by: Vlastimil Babka <vbabka@suse.cz> Cc: Hillf Danton <hillf.zj@alibaba-inc.com> Cc: Joonsoo Kim <iamjoonsoo.kim@lge.com> Cc: Michal Hocko <mhocko@kernel.org> Cc: Rik van Riel <riel@surriel.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm/swap.c')
-rw-r--r--mm/swap.c30
1 files changed, 15 insertions, 15 deletions
diff --git a/mm/swap.c b/mm/swap.c
index 616df4ddd870..bf37e5cfae81 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -62,12 +62,12 @@ static void __page_cache_release(struct page *page)
struct lruvec *lruvec;
unsigned long flags;
- spin_lock_irqsave(&zone->lru_lock, flags);
+ spin_lock_irqsave(zone_lru_lock(zone), flags);
lruvec = mem_cgroup_page_lruvec(page, zone);
VM_BUG_ON_PAGE(!PageLRU(page), page);
__ClearPageLRU(page);
del_page_from_lru_list(page, lruvec, page_off_lru(page));
- spin_unlock_irqrestore(&zone->lru_lock, flags);
+ spin_unlock_irqrestore(zone_lru_lock(zone), flags);
}
mem_cgroup_uncharge(page);
}
@@ -189,16 +189,16 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
if (pagezone != zone) {
if (zone)
- spin_unlock_irqrestore(&zone->lru_lock, flags);
+ spin_unlock_irqrestore(zone_lru_lock(zone), flags);
zone = pagezone;
- spin_lock_irqsave(&zone->lru_lock, flags);
+ spin_lock_irqsave(zone_lru_lock(zone), flags);
}
lruvec = mem_cgroup_page_lruvec(page, zone);
(*move_fn)(page, lruvec, arg);
}
if (zone)
- spin_unlock_irqrestore(&zone->lru_lock, flags);
+ spin_unlock_irqrestore(zone_lru_lock(zone), flags);
release_pages(pvec->pages, pvec->nr, pvec->cold);
pagevec_reinit(pvec);
}
@@ -318,9 +318,9 @@ void activate_page(struct page *page)
struct zone *zone = page_zone(page);
page = compound_head(page);
- spin_lock_irq(&zone->lru_lock);
+ spin_lock_irq(zone_lru_lock(zone));
__activate_page(page, mem_cgroup_page_lruvec(page, zone), NULL);
- spin_unlock_irq(&zone->lru_lock);
+ spin_unlock_irq(zone_lru_lock(zone));
}
#endif
@@ -448,13 +448,13 @@ void add_page_to_unevictable_list(struct page *page)
struct zone *zone = page_zone(page);
struct lruvec *lruvec;
- spin_lock_irq(&zone->lru_lock);
+ spin_lock_irq(zone_lru_lock(zone));
lruvec = mem_cgroup_page_lruvec(page, zone);
ClearPageActive(page);
SetPageUnevictable(page);
SetPageLRU(page);
add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE);
- spin_unlock_irq(&zone->lru_lock);
+ spin_unlock_irq(zone_lru_lock(zone));
}
/**
@@ -744,7 +744,7 @@ void release_pages(struct page **pages, int nr, bool cold)
* same zone. The lock is held only if zone != NULL.
*/
if (zone && ++lock_batch == SWAP_CLUSTER_MAX) {
- spin_unlock_irqrestore(&zone->lru_lock, flags);
+ spin_unlock_irqrestore(zone_lru_lock(zone), flags);
zone = NULL;
}
@@ -759,7 +759,7 @@ void release_pages(struct page **pages, int nr, bool cold)
if (PageCompound(page)) {
if (zone) {
- spin_unlock_irqrestore(&zone->lru_lock, flags);
+ spin_unlock_irqrestore(zone_lru_lock(zone), flags);
zone = NULL;
}
__put_compound_page(page);
@@ -771,11 +771,11 @@ void release_pages(struct page **pages, int nr, bool cold)
if (pagezone != zone) {
if (zone)
- spin_unlock_irqrestore(&zone->lru_lock,
+ spin_unlock_irqrestore(zone_lru_lock(zone),
flags);
lock_batch = 0;
zone = pagezone;
- spin_lock_irqsave(&zone->lru_lock, flags);
+ spin_lock_irqsave(zone_lru_lock(zone), flags);
}
lruvec = mem_cgroup_page_lruvec(page, zone);
@@ -790,7 +790,7 @@ void release_pages(struct page **pages, int nr, bool cold)
list_add(&page->lru, &pages_to_free);
}
if (zone)
- spin_unlock_irqrestore(&zone->lru_lock, flags);
+ spin_unlock_irqrestore(zone_lru_lock(zone), flags);
mem_cgroup_uncharge_list(&pages_to_free);
free_hot_cold_page_list(&pages_to_free, cold);
@@ -826,7 +826,7 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
VM_BUG_ON_PAGE(PageCompound(page_tail), page);
VM_BUG_ON_PAGE(PageLRU(page_tail), page);
VM_BUG_ON(NR_CPUS != 1 &&
- !spin_is_locked(&lruvec_zone(lruvec)->lru_lock));
+ !spin_is_locked(zone_lru_lock(lruvec_zone(lruvec))));
if (!list)
SetPageLRU(page_tail);