summaryrefslogtreecommitdiffstats
path: root/mm/swap.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/swap.c')
-rw-r--r--mm/swap.c76
1 files changed, 38 insertions, 38 deletions
diff --git a/mm/swap.c b/mm/swap.c
index 616df4ddd870..75c63bb2a1da 100644
--- a/mm/swap.c
+++ b/mm/swap.c
@@ -62,12 +62,12 @@ static void __page_cache_release(struct page *page)
struct lruvec *lruvec;
unsigned long flags;
- spin_lock_irqsave(&zone->lru_lock, flags);
- lruvec = mem_cgroup_page_lruvec(page, zone);
+ spin_lock_irqsave(zone_lru_lock(zone), flags);
+ lruvec = mem_cgroup_page_lruvec(page, zone->zone_pgdat);
VM_BUG_ON_PAGE(!PageLRU(page), page);
__ClearPageLRU(page);
del_page_from_lru_list(page, lruvec, page_off_lru(page));
- spin_unlock_irqrestore(&zone->lru_lock, flags);
+ spin_unlock_irqrestore(zone_lru_lock(zone), flags);
}
mem_cgroup_uncharge(page);
}
@@ -179,26 +179,26 @@ static void pagevec_lru_move_fn(struct pagevec *pvec,
void *arg)
{
int i;
- struct zone *zone = NULL;
+ struct pglist_data *pgdat = NULL;
struct lruvec *lruvec;
unsigned long flags = 0;
for (i = 0; i < pagevec_count(pvec); i++) {
struct page *page = pvec->pages[i];
- struct zone *pagezone = page_zone(page);
+ struct pglist_data *pagepgdat = page_pgdat(page);
- if (pagezone != zone) {
- if (zone)
- spin_unlock_irqrestore(&zone->lru_lock, flags);
- zone = pagezone;
- spin_lock_irqsave(&zone->lru_lock, flags);
+ if (pagepgdat != pgdat) {
+ if (pgdat)
+ spin_unlock_irqrestore(&pgdat->lru_lock, flags);
+ pgdat = pagepgdat;
+ spin_lock_irqsave(&pgdat->lru_lock, flags);
}
- lruvec = mem_cgroup_page_lruvec(page, zone);
+ lruvec = mem_cgroup_page_lruvec(page, pgdat);
(*move_fn)(page, lruvec, arg);
}
- if (zone)
- spin_unlock_irqrestore(&zone->lru_lock, flags);
+ if (pgdat)
+ spin_unlock_irqrestore(&pgdat->lru_lock, flags);
release_pages(pvec->pages, pvec->nr, pvec->cold);
pagevec_reinit(pvec);
}
@@ -318,9 +318,9 @@ void activate_page(struct page *page)
struct zone *zone = page_zone(page);
page = compound_head(page);
- spin_lock_irq(&zone->lru_lock);
- __activate_page(page, mem_cgroup_page_lruvec(page, zone), NULL);
- spin_unlock_irq(&zone->lru_lock);
+ spin_lock_irq(zone_lru_lock(zone));
+ __activate_page(page, mem_cgroup_page_lruvec(page, zone->zone_pgdat), NULL);
+ spin_unlock_irq(zone_lru_lock(zone));
}
#endif
@@ -445,16 +445,16 @@ void lru_cache_add(struct page *page)
*/
void add_page_to_unevictable_list(struct page *page)
{
- struct zone *zone = page_zone(page);
+ struct pglist_data *pgdat = page_pgdat(page);
struct lruvec *lruvec;
- spin_lock_irq(&zone->lru_lock);
- lruvec = mem_cgroup_page_lruvec(page, zone);
+ spin_lock_irq(&pgdat->lru_lock);
+ lruvec = mem_cgroup_page_lruvec(page, pgdat);
ClearPageActive(page);
SetPageUnevictable(page);
SetPageLRU(page);
add_page_to_lru_list(page, lruvec, LRU_UNEVICTABLE);
- spin_unlock_irq(&zone->lru_lock);
+ spin_unlock_irq(&pgdat->lru_lock);
}
/**
@@ -730,7 +730,7 @@ void release_pages(struct page **pages, int nr, bool cold)
{
int i;
LIST_HEAD(pages_to_free);
- struct zone *zone = NULL;
+ struct pglist_data *locked_pgdat = NULL;
struct lruvec *lruvec;
unsigned long uninitialized_var(flags);
unsigned int uninitialized_var(lock_batch);
@@ -741,11 +741,11 @@ void release_pages(struct page **pages, int nr, bool cold)
/*
* Make sure the IRQ-safe lock-holding time does not get
* excessive with a continuous string of pages from the
- * same zone. The lock is held only if zone != NULL.
+ * same pgdat. The lock is held only if pgdat != NULL.
*/
- if (zone && ++lock_batch == SWAP_CLUSTER_MAX) {
- spin_unlock_irqrestore(&zone->lru_lock, flags);
- zone = NULL;
+ if (locked_pgdat && ++lock_batch == SWAP_CLUSTER_MAX) {
+ spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags);
+ locked_pgdat = NULL;
}
if (is_huge_zero_page(page)) {
@@ -758,27 +758,27 @@ void release_pages(struct page **pages, int nr, bool cold)
continue;
if (PageCompound(page)) {
- if (zone) {
- spin_unlock_irqrestore(&zone->lru_lock, flags);
- zone = NULL;
+ if (locked_pgdat) {
+ spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags);
+ locked_pgdat = NULL;
}
__put_compound_page(page);
continue;
}
if (PageLRU(page)) {
- struct zone *pagezone = page_zone(page);
+ struct pglist_data *pgdat = page_pgdat(page);
- if (pagezone != zone) {
- if (zone)
- spin_unlock_irqrestore(&zone->lru_lock,
+ if (pgdat != locked_pgdat) {
+ if (locked_pgdat)
+ spin_unlock_irqrestore(&locked_pgdat->lru_lock,
flags);
lock_batch = 0;
- zone = pagezone;
- spin_lock_irqsave(&zone->lru_lock, flags);
+ locked_pgdat = pgdat;
+ spin_lock_irqsave(&locked_pgdat->lru_lock, flags);
}
- lruvec = mem_cgroup_page_lruvec(page, zone);
+ lruvec = mem_cgroup_page_lruvec(page, locked_pgdat);
VM_BUG_ON_PAGE(!PageLRU(page), page);
__ClearPageLRU(page);
del_page_from_lru_list(page, lruvec, page_off_lru(page));
@@ -789,8 +789,8 @@ void release_pages(struct page **pages, int nr, bool cold)
list_add(&page->lru, &pages_to_free);
}
- if (zone)
- spin_unlock_irqrestore(&zone->lru_lock, flags);
+ if (locked_pgdat)
+ spin_unlock_irqrestore(&locked_pgdat->lru_lock, flags);
mem_cgroup_uncharge_list(&pages_to_free);
free_hot_cold_page_list(&pages_to_free, cold);
@@ -826,7 +826,7 @@ void lru_add_page_tail(struct page *page, struct page *page_tail,
VM_BUG_ON_PAGE(PageCompound(page_tail), page);
VM_BUG_ON_PAGE(PageLRU(page_tail), page);
VM_BUG_ON(NR_CPUS != 1 &&
- !spin_is_locked(&lruvec_zone(lruvec)->lru_lock));
+ !spin_is_locked(&lruvec_pgdat(lruvec)->lru_lock));
if (!list)
SetPageLRU(page_tail);