summaryrefslogtreecommitdiffstats
path: root/mm/ksm.c
diff options
context:
space:
mode:
Diffstat (limited to 'mm/ksm.c')
-rw-r--r--mm/ksm.c313
1 files changed, 158 insertions, 155 deletions
diff --git a/mm/ksm.c b/mm/ksm.c
index 8c001819cf10..34c4820e0d3d 100644
--- a/mm/ksm.c
+++ b/mm/ksm.c
@@ -296,7 +296,7 @@ static bool ksm_use_zero_pages __read_mostly;
static bool ksm_smart_scan = true;
/* The number of zero pages which is placed by KSM */
-unsigned long ksm_zero_pages;
+atomic_long_t ksm_zero_pages = ATOMIC_LONG_INIT(0);
/* The number of pages that have been skipped due to "smart scanning" */
static unsigned long ksm_pages_skipped;
@@ -890,14 +890,14 @@ static void remove_node_from_stable_tree(struct ksm_stable_node *stable_node)
free_stable_node(stable_node);
}
-enum get_ksm_page_flags {
- GET_KSM_PAGE_NOLOCK,
- GET_KSM_PAGE_LOCK,
- GET_KSM_PAGE_TRYLOCK
+enum ksm_get_folio_flags {
+ KSM_GET_FOLIO_NOLOCK,
+ KSM_GET_FOLIO_LOCK,
+ KSM_GET_FOLIO_TRYLOCK
};
/*
- * get_ksm_page: checks if the page indicated by the stable node
+ * ksm_get_folio: checks if the page indicated by the stable node
* is still its ksm page, despite having held no reference to it.
* In which case we can trust the content of the page, and it
* returns the gotten page; but if the page has now been zapped,
@@ -915,10 +915,10 @@ enum get_ksm_page_flags {
* a page to put something that might look like our key in page->mapping.
* is on its way to being freed; but it is an anomaly to bear in mind.
*/
-static struct page *get_ksm_page(struct ksm_stable_node *stable_node,
- enum get_ksm_page_flags flags)
+static struct folio *ksm_get_folio(struct ksm_stable_node *stable_node,
+ enum ksm_get_folio_flags flags)
{
- struct page *page;
+ struct folio *folio;
void *expected_mapping;
unsigned long kpfn;
@@ -926,8 +926,8 @@ static struct page *get_ksm_page(struct ksm_stable_node *stable_node,
PAGE_MAPPING_KSM);
again:
kpfn = READ_ONCE(stable_node->kpfn); /* Address dependency. */
- page = pfn_to_page(kpfn);
- if (READ_ONCE(page->mapping) != expected_mapping)
+ folio = pfn_folio(kpfn);
+ if (READ_ONCE(folio->mapping) != expected_mapping)
goto stale;
/*
@@ -940,41 +940,41 @@ again:
* in folio_migrate_mapping(), it might still be our page,
* in which case it's essential to keep the node.
*/
- while (!get_page_unless_zero(page)) {
+ while (!folio_try_get(folio)) {
/*
* Another check for page->mapping != expected_mapping would
* work here too. We have chosen the !PageSwapCache test to
* optimize the common case, when the page is or is about to
* be freed: PageSwapCache is cleared (under spin_lock_irq)
* in the ref_freeze section of __remove_mapping(); but Anon
- * page->mapping reset to NULL later, in free_pages_prepare().
+ * folio->mapping reset to NULL later, in free_pages_prepare().
*/
- if (!PageSwapCache(page))
+ if (!folio_test_swapcache(folio))
goto stale;
cpu_relax();
}
- if (READ_ONCE(page->mapping) != expected_mapping) {
- put_page(page);
+ if (READ_ONCE(folio->mapping) != expected_mapping) {
+ folio_put(folio);
goto stale;
}
- if (flags == GET_KSM_PAGE_TRYLOCK) {
- if (!trylock_page(page)) {
- put_page(page);
+ if (flags == KSM_GET_FOLIO_TRYLOCK) {
+ if (!folio_trylock(folio)) {
+ folio_put(folio);
return ERR_PTR(-EBUSY);
}
- } else if (flags == GET_KSM_PAGE_LOCK)
- lock_page(page);
+ } else if (flags == KSM_GET_FOLIO_LOCK)
+ folio_lock(folio);
- if (flags != GET_KSM_PAGE_NOLOCK) {
- if (READ_ONCE(page->mapping) != expected_mapping) {
- unlock_page(page);
- put_page(page);
+ if (flags != KSM_GET_FOLIO_NOLOCK) {
+ if (READ_ONCE(folio->mapping) != expected_mapping) {
+ folio_unlock(folio);
+ folio_put(folio);
goto stale;
}
}
- return page;
+ return folio;
stale:
/*
@@ -998,16 +998,16 @@ static void remove_rmap_item_from_tree(struct ksm_rmap_item *rmap_item)
{
if (rmap_item->address & STABLE_FLAG) {
struct ksm_stable_node *stable_node;
- struct page *page;
+ struct folio *folio;
stable_node = rmap_item->head;
- page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK);
- if (!page)
+ folio = ksm_get_folio(stable_node, KSM_GET_FOLIO_LOCK);
+ if (!folio)
goto out;
hlist_del(&rmap_item->hlist);
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
if (!hlist_empty(&stable_node->hlist))
ksm_pages_sharing--;
@@ -1094,11 +1094,11 @@ static inline struct ksm_stable_node *page_stable_node(struct page *page)
return folio_stable_node(page_folio(page));
}
-static inline void set_page_stable_node(struct page *page,
- struct ksm_stable_node *stable_node)
+static inline void folio_set_stable_node(struct folio *folio,
+ struct ksm_stable_node *stable_node)
{
- VM_BUG_ON_PAGE(PageAnon(page) && PageAnonExclusive(page), page);
- page->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
+ VM_WARN_ON_FOLIO(folio_test_anon(folio) && PageAnonExclusive(&folio->page), folio);
+ folio->mapping = (void *)((unsigned long)stable_node | PAGE_MAPPING_KSM);
}
#ifdef CONFIG_SYSFS
@@ -1107,13 +1107,13 @@ static inline void set_page_stable_node(struct page *page,
*/
static int remove_stable_node(struct ksm_stable_node *stable_node)
{
- struct page *page;
+ struct folio *folio;
int err;
- page = get_ksm_page(stable_node, GET_KSM_PAGE_LOCK);
- if (!page) {
+ folio = ksm_get_folio(stable_node, KSM_GET_FOLIO_LOCK);
+ if (!folio) {
/*
- * get_ksm_page did remove_node_from_stable_tree itself.
+ * ksm_get_folio did remove_node_from_stable_tree itself.
*/
return 0;
}
@@ -1124,22 +1124,22 @@ static int remove_stable_node(struct ksm_stable_node *stable_node)
* merge_across_nodes/max_page_sharing be switched.
*/
err = -EBUSY;
- if (!page_mapped(page)) {
+ if (!folio_mapped(folio)) {
/*
- * The stable node did not yet appear stale to get_ksm_page(),
- * since that allows for an unmapped ksm page to be recognized
+ * The stable node did not yet appear stale to ksm_get_folio(),
+ * since that allows for an unmapped ksm folio to be recognized
* right up until it is freed; but the node is safe to remove.
- * This page might be in an LRU cache waiting to be freed,
- * or it might be PageSwapCache (perhaps under writeback),
+ * This folio might be in an LRU cache waiting to be freed,
+ * or it might be in the swapcache (perhaps under writeback),
* or it might have been removed from swapcache a moment ago.
*/
- set_page_stable_node(page, NULL);
+ folio_set_stable_node(folio, NULL);
remove_node_from_stable_tree(stable_node);
err = 0;
}
- unlock_page(page);
- put_page(page);
+ folio_unlock(folio);
+ folio_put(folio);
return err;
}
@@ -1275,23 +1275,24 @@ static u32 calc_checksum(struct page *page)
return checksum;
}
-static int write_protect_page(struct vm_area_struct *vma, struct page *page,
+static int write_protect_page(struct vm_area_struct *vma, struct folio *folio,
pte_t *orig_pte)
{
struct mm_struct *mm = vma->vm_mm;
- DEFINE_PAGE_VMA_WALK(pvmw, page, vma, 0, 0);
+ DEFINE_FOLIO_VMA_WALK(pvmw, folio, vma, 0, 0);
int swapped;
int err = -EFAULT;
struct mmu_notifier_range range;
bool anon_exclusive;
pte_t entry;
- pvmw.address = page_address_in_vma(page, vma);
+ if (WARN_ON_ONCE(folio_test_large(folio)))
+ return err;
+
+ pvmw.address = page_address_in_vma(&folio->page, vma);
if (pvmw.address == -EFAULT)
goto out;
- BUG_ON(PageTransCompound(page));
-
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm, pvmw.address,
pvmw.address + PAGE_SIZE);
mmu_notifier_invalidate_range_start(&range);
@@ -1301,12 +1302,12 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
if (WARN_ONCE(!pvmw.pte, "Unexpected PMD mapping?"))
goto out_unlock;
- anon_exclusive = PageAnonExclusive(page);
+ anon_exclusive = PageAnonExclusive(&folio->page);
entry = ptep_get(pvmw.pte);
if (pte_write(entry) || pte_dirty(entry) ||
anon_exclusive || mm_tlb_flush_pending(mm)) {
- swapped = PageSwapCache(page);
- flush_cache_page(vma, pvmw.address, page_to_pfn(page));
+ swapped = folio_test_swapcache(folio);
+ flush_cache_page(vma, pvmw.address, folio_pfn(folio));
/*
* Ok this is tricky, when get_user_pages_fast() run it doesn't
* take any lock, therefore the check that we are going to make
@@ -1326,26 +1327,26 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
* Check that no O_DIRECT or similar I/O is in progress on the
* page
*/
- if (page_mapcount(page) + 1 + swapped != page_count(page)) {
+ if (folio_mapcount(folio) + 1 + swapped != folio_ref_count(folio)) {
set_pte_at(mm, pvmw.address, pvmw.pte, entry);
goto out_unlock;
}
/* See folio_try_share_anon_rmap_pte(): clear PTE first. */
if (anon_exclusive &&
- folio_try_share_anon_rmap_pte(page_folio(page), page)) {
+ folio_try_share_anon_rmap_pte(folio, &folio->page)) {
set_pte_at(mm, pvmw.address, pvmw.pte, entry);
goto out_unlock;
}
if (pte_dirty(entry))
- set_page_dirty(page);
+ folio_mark_dirty(folio);
entry = pte_mkclean(entry);
if (pte_write(entry))
entry = pte_wrprotect(entry);
- set_pte_at_notify(mm, pvmw.address, pvmw.pte, entry);
+ set_pte_at(mm, pvmw.address, pvmw.pte, entry);
}
*orig_pte = entry;
err = 0;
@@ -1428,8 +1429,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
* the dirty bit in zero page's PTE is set.
*/
newpte = pte_mkdirty(pte_mkspecial(pfn_pte(page_to_pfn(kpage), vma->vm_page_prot)));
- ksm_zero_pages++;
- mm->ksm_zero_pages++;
+ ksm_map_zero_page(mm);
/*
* We're replacing an anonymous page with a zero page, which is
* not anonymous. We need to do proper accounting otherwise we
@@ -1447,7 +1447,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page,
* See Documentation/mm/mmu_notifier.rst
*/
ptep_clear_flush(vma, addr, ptep);
- set_pte_at_notify(mm, addr, ptep, newpte);
+ set_pte_at(mm, addr, ptep, newpte);
folio = page_folio(page);
folio_remove_rmap_pte(folio, page, vma);
@@ -1505,14 +1505,14 @@ static int try_to_merge_one_page(struct vm_area_struct *vma,
* ptes are necessarily already write-protected. But in either
* case, we need to lock and check page_count is not raised.
*/
- if (write_protect_page(vma, page, &orig_pte) == 0) {
+ if (write_protect_page(vma, page_folio(page), &orig_pte) == 0) {
if (!kpage) {
/*
* While we hold page lock, upgrade page from
* PageAnon+anon_vma to PageKsm+NULL stable_node:
* stable_tree_insert() will update stable_node.
*/
- set_page_stable_node(page, NULL);
+ folio_set_stable_node(page_folio(page), NULL);
mark_page_accessed(page);
/*
* Page reclaim just frees a clean page with no dirty
@@ -1617,14 +1617,14 @@ bool is_page_sharing_candidate(struct ksm_stable_node *stable_node)
return __is_page_sharing_candidate(stable_node, 0);
}
-static struct page *stable_node_dup(struct ksm_stable_node **_stable_node_dup,
- struct ksm_stable_node **_stable_node,
- struct rb_root *root,
- bool prune_stale_stable_nodes)
+static struct folio *stable_node_dup(struct ksm_stable_node **_stable_node_dup,
+ struct ksm_stable_node **_stable_node,
+ struct rb_root *root,
+ bool prune_stale_stable_nodes)
{
struct ksm_stable_node *dup, *found = NULL, *stable_node = *_stable_node;
struct hlist_node *hlist_safe;
- struct page *_tree_page, *tree_page = NULL;
+ struct folio *folio, *tree_folio = NULL;
int nr = 0;
int found_rmap_hlist_len;
@@ -1643,24 +1643,24 @@ static struct page *stable_node_dup(struct ksm_stable_node **_stable_node_dup,
* We must walk all stable_node_dup to prune the stale
* stable nodes during lookup.
*
- * get_ksm_page can drop the nodes from the
+ * ksm_get_folio can drop the nodes from the
* stable_node->hlist if they point to freed pages
* (that's why we do a _safe walk). The "dup"
* stable_node parameter itself will be freed from
* under us if it returns NULL.
*/
- _tree_page = get_ksm_page(dup, GET_KSM_PAGE_NOLOCK);
- if (!_tree_page)
+ folio = ksm_get_folio(dup, KSM_GET_FOLIO_NOLOCK);
+ if (!folio)
continue;
nr += 1;
if (is_page_sharing_candidate(dup)) {
if (!found ||
dup->rmap_hlist_len > found_rmap_hlist_len) {
if (found)
- put_page(tree_page);
+ folio_put(tree_folio);
found = dup;
found_rmap_hlist_len = found->rmap_hlist_len;
- tree_page = _tree_page;
+ tree_folio = folio;
/* skip put_page for found dup */
if (!prune_stale_stable_nodes)
@@ -1668,7 +1668,7 @@ static struct page *stable_node_dup(struct ksm_stable_node **_stable_node_dup,
continue;
}
}
- put_page(_tree_page);
+ folio_put(folio);
}
if (found) {
@@ -1733,7 +1733,7 @@ static struct page *stable_node_dup(struct ksm_stable_node **_stable_node_dup,
}
*_stable_node_dup = found;
- return tree_page;
+ return tree_folio;
}
static struct ksm_stable_node *stable_node_dup_any(struct ksm_stable_node *stable_node,
@@ -1750,7 +1750,7 @@ static struct ksm_stable_node *stable_node_dup_any(struct ksm_stable_node *stabl
}
/*
- * Like for get_ksm_page, this function can free the *_stable_node and
+ * Like for ksm_get_folio, this function can free the *_stable_node and
* *_stable_node_dup if the returned tree_page is NULL.
*
* It can also free and overwrite *_stable_node with the found
@@ -1763,16 +1763,16 @@ static struct ksm_stable_node *stable_node_dup_any(struct ksm_stable_node *stabl
* function and will be overwritten in all cases, the caller doesn't
* need to initialize it.
*/
-static struct page *__stable_node_chain(struct ksm_stable_node **_stable_node_dup,
- struct ksm_stable_node **_stable_node,
- struct rb_root *root,
- bool prune_stale_stable_nodes)
+static struct folio *__stable_node_chain(struct ksm_stable_node **_stable_node_dup,
+ struct ksm_stable_node **_stable_node,
+ struct rb_root *root,
+ bool prune_stale_stable_nodes)
{
struct ksm_stable_node *stable_node = *_stable_node;
if (!is_stable_node_chain(stable_node)) {
if (is_page_sharing_candidate(stable_node)) {
*_stable_node_dup = stable_node;
- return get_ksm_page(stable_node, GET_KSM_PAGE_NOLOCK);
+ return ksm_get_folio(stable_node, KSM_GET_FOLIO_NOLOCK);
}
/*
* _stable_node_dup set to NULL means the stable_node
@@ -1785,24 +1785,24 @@ static struct page *__stable_node_chain(struct ksm_stable_node **_stable_node_du
prune_stale_stable_nodes);
}
-static __always_inline struct page *chain_prune(struct ksm_stable_node **s_n_d,
- struct ksm_stable_node **s_n,
- struct rb_root *root)
+static __always_inline struct folio *chain_prune(struct ksm_stable_node **s_n_d,
+ struct ksm_stable_node **s_n,
+ struct rb_root *root)
{
return __stable_node_chain(s_n_d, s_n, root, true);
}
-static __always_inline struct page *chain(struct ksm_stable_node **s_n_d,
- struct ksm_stable_node *s_n,
- struct rb_root *root)
+static __always_inline struct folio *chain(struct ksm_stable_node **s_n_d,
+ struct ksm_stable_node *s_n,
+ struct rb_root *root)
{
struct ksm_stable_node *old_stable_node = s_n;
- struct page *tree_page;
+ struct folio *tree_folio;
- tree_page = __stable_node_chain(s_n_d, &s_n, root, false);
+ tree_folio = __stable_node_chain(s_n_d, &s_n, root, false);
/* not pruning dups so s_n cannot have changed */
VM_BUG_ON(s_n != old_stable_node);
- return tree_page;
+ return tree_folio;
}
/*
@@ -1822,28 +1822,30 @@ static struct page *stable_tree_search(struct page *page)
struct rb_node *parent;
struct ksm_stable_node *stable_node, *stable_node_dup, *stable_node_any;
struct ksm_stable_node *page_node;
+ struct folio *folio;
- page_node = page_stable_node(page);
+ folio = page_folio(page);
+ page_node = folio_stable_node(folio);
if (page_node && page_node->head != &migrate_nodes) {
/* ksm page forked */
- get_page(page);
- return page;
+ folio_get(folio);
+ return &folio->page;
}
- nid = get_kpfn_nid(page_to_pfn(page));
+ nid = get_kpfn_nid(folio_pfn(folio));
root = root_stable_tree + nid;
again:
new = &root->rb_node;
parent = NULL;
while (*new) {
- struct page *tree_page;
+ struct folio *tree_folio;
int ret;
cond_resched();
stable_node = rb_entry(*new, struct ksm_stable_node, node);
stable_node_any = NULL;
- tree_page = chain_prune(&stable_node_dup, &stable_node, root);
+ tree_folio = chain_prune(&stable_node_dup, &stable_node, root);
/*
* NOTE: stable_node may have been freed by
* chain_prune() if the returned stable_node_dup is
@@ -1877,14 +1879,14 @@ again:
* write protected at all times. Any will work
* fine to continue the walk.
*/
- tree_page = get_ksm_page(stable_node_any,
- GET_KSM_PAGE_NOLOCK);
+ tree_folio = ksm_get_folio(stable_node_any,
+ KSM_GET_FOLIO_NOLOCK);
}
VM_BUG_ON(!stable_node_dup ^ !!stable_node_any);
- if (!tree_page) {
+ if (!tree_folio) {
/*
* If we walked over a stale stable_node,
- * get_ksm_page() will call rb_erase() and it
+ * ksm_get_folio() will call rb_erase() and it
* may rebalance the tree from under us. So
* restart the search from scratch. Returning
* NULL would be safe too, but we'd generate
@@ -1894,8 +1896,8 @@ again:
goto again;
}
- ret = memcmp_pages(page, tree_page);
- put_page(tree_page);
+ ret = memcmp_pages(page, &tree_folio->page);
+ folio_put(tree_folio);
parent = *new;
if (ret < 0)
@@ -1906,12 +1908,15 @@ again:
if (page_node) {
VM_BUG_ON(page_node->head != &migrate_nodes);
/*
- * Test if the migrated page should be merged
- * into a stable node dup. If the mapcount is
- * 1 we can migrate it with another KSM page
- * without adding it to the chain.
+ * If the mapcount of our migrated KSM folio is
+ * at most 1, we can merge it with another
+ * KSM folio where we know that we have space
+ * for one more mapping without exceeding the
+ * ksm_max_page_sharing limit: see
+ * chain_prune(). This way, we can avoid adding
+ * this stable node to the chain.
*/
- if (page_mapcount(page) > 1)
+ if (folio_mapcount(folio) > 1)
goto chain_append;
}
@@ -1938,26 +1943,26 @@ again:
* It would be more elegant to return stable_node
* than kpage, but that involves more changes.
*/
- tree_page = get_ksm_page(stable_node_dup,
- GET_KSM_PAGE_TRYLOCK);
+ tree_folio = ksm_get_folio(stable_node_dup,
+ KSM_GET_FOLIO_TRYLOCK);
- if (PTR_ERR(tree_page) == -EBUSY)
+ if (PTR_ERR(tree_folio) == -EBUSY)
return ERR_PTR(-EBUSY);
- if (unlikely(!tree_page))
+ if (unlikely(!tree_folio))
/*
* The tree may have been rebalanced,
* so re-evaluate parent and new.
*/
goto again;
- unlock_page(tree_page);
+ folio_unlock(tree_folio);
if (get_kpfn_nid(stable_node_dup->kpfn) !=
NUMA(stable_node_dup->nid)) {
- put_page(tree_page);
+ folio_put(tree_folio);
goto replace;
}
- return tree_page;
+ return &tree_folio->page;
}
}
@@ -1970,8 +1975,8 @@ again:
rb_insert_color(&page_node->node, root);
out:
if (is_page_sharing_candidate(page_node)) {
- get_page(page);
- return page;
+ folio_get(folio);
+ return &folio->page;
} else
return NULL;
@@ -1996,12 +2001,12 @@ replace:
&page_node->node,
root);
if (is_page_sharing_candidate(page_node))
- get_page(page);
+ folio_get(folio);
else
- page = NULL;
+ folio = NULL;
} else {
rb_erase(&stable_node_dup->node, root);
- page = NULL;
+ folio = NULL;
}
} else {
VM_BUG_ON(!is_stable_node_chain(stable_node));
@@ -2012,16 +2017,16 @@ replace:
DO_NUMA(page_node->nid = nid);
stable_node_chain_add_dup(page_node, stable_node);
if (is_page_sharing_candidate(page_node))
- get_page(page);
+ folio_get(folio);
else
- page = NULL;
+ folio = NULL;
} else {
- page = NULL;
+ folio = NULL;
}
}
stable_node_dup->head = &migrate_nodes;
list_add(&stable_node_dup->list, stable_node_dup->head);
- return page;
+ return &folio->page;
chain_append:
/* stable_node_dup could be null if it reached the limit */
@@ -2064,7 +2069,7 @@ chain_append:
* This function returns the stable tree node just allocated on success,
* NULL otherwise.
*/
-static struct ksm_stable_node *stable_tree_insert(struct page *kpage)
+static struct ksm_stable_node *stable_tree_insert(struct folio *kfolio)
{
int nid;
unsigned long kpfn;
@@ -2074,7 +2079,7 @@ static struct ksm_stable_node *stable_tree_insert(struct page *kpage)
struct ksm_stable_node *stable_node, *stable_node_dup, *stable_node_any;
bool need_chain = false;
- kpfn = page_to_pfn(kpage);
+ kpfn = folio_pfn(kfolio);
nid = get_kpfn_nid(kpfn);
root = root_stable_tree + nid;
again:
@@ -2082,13 +2087,13 @@ again:
new = &root->rb_node;
while (*new) {
- struct page *tree_page;
+ struct folio *tree_folio;
int ret;
cond_resched();
stable_node = rb_entry(*new, struct ksm_stable_node, node);
stable_node_any = NULL;
- tree_page = chain(&stable_node_dup, stable_node, root);
+ tree_folio = chain(&stable_node_dup, stable_node, root);
if (!stable_node_dup) {
/*
* Either all stable_node dups were full in
@@ -2110,14 +2115,14 @@ again:
* write protected at all times. Any will work
* fine to continue the walk.
*/
- tree_page = get_ksm_page(stable_node_any,
- GET_KSM_PAGE_NOLOCK);
+ tree_folio = ksm_get_folio(stable_node_any,
+ KSM_GET_FOLIO_NOLOCK);
}
VM_BUG_ON(!stable_node_dup ^ !!stable_node_any);
- if (!tree_page) {
+ if (!tree_folio) {
/*
* If we walked over a stale stable_node,
- * get_ksm_page() will call rb_erase() and it
+ * ksm_get_folio() will call rb_erase() and it
* may rebalance the tree from under us. So
* restart the search from scratch. Returning
* NULL would be safe too, but we'd generate
@@ -2127,8 +2132,8 @@ again:
goto again;
}
- ret = memcmp_pages(kpage, tree_page);
- put_page(tree_page);
+ ret = memcmp_pages(&kfolio->page, &tree_folio->page);
+ folio_put(tree_folio);
parent = *new;
if (ret < 0)
@@ -2147,7 +2152,6 @@ again:
INIT_HLIST_HEAD(&stable_node_dup->hlist);
stable_node_dup->kpfn = kpfn;
- set_page_stable_node(kpage, stable_node_dup);
stable_node_dup->rmap_hlist_len = 0;
DO_NUMA(stable_node_dup->nid = nid);
if (!need_chain) {
@@ -2166,6 +2170,8 @@ again:
stable_node_chain_add_dup(stable_node_dup, stable_node);
}
+ folio_set_stable_node(kfolio, stable_node_dup);
+
return stable_node_dup;
}
@@ -2425,7 +2431,7 @@ static void cmp_and_merge_page(struct page *page, struct ksm_rmap_item *rmap_ite
* node in the stable tree and add both rmap_items.
*/
lock_page(kpage);
- stable_node = stable_tree_insert(kpage);
+ stable_node = stable_tree_insert(page_folio(kpage));
if (stable_node) {
stable_tree_append(tree_rmap_item, stable_node,
false);
@@ -2597,14 +2603,14 @@ static struct ksm_rmap_item *scan_get_next_rmap_item(struct page **page)
*/
if (!ksm_merge_across_nodes) {
struct ksm_stable_node *stable_node, *next;
- struct page *page;
+ struct folio *folio;
list_for_each_entry_safe(stable_node, next,
&migrate_nodes, list) {
- page = get_ksm_page(stable_node,
- GET_KSM_PAGE_NOLOCK);
- if (page)
- put_page(page);
+ folio = ksm_get_folio(stable_node,
+ KSM_GET_FOLIO_NOLOCK);
+ if (folio)
+ folio_put(folio);
cond_resched();
}
}
@@ -2747,18 +2753,16 @@ static void ksm_do_scan(unsigned int scan_npages)
{
struct ksm_rmap_item *rmap_item;
struct page *page;
- unsigned int npages = scan_npages;
- while (npages-- && likely(!freezing(current))) {
+ while (scan_npages-- && likely(!freezing(current))) {
cond_resched();
rmap_item = scan_get_next_rmap_item(&page);
if (!rmap_item)
return;
cmp_and_merge_page(page, rmap_item);
put_page(page);
+ ksm_pages_scanned++;
}
-
- ksm_pages_scanned += scan_npages - npages;
}
static int ksmd_should_run(void)
@@ -3172,12 +3176,11 @@ again:
/*
* Collect processes when the error hit an ksm page.
*/
-void collect_procs_ksm(struct page *page, struct list_head *to_kill,
- int force_early)
+void collect_procs_ksm(struct folio *folio, struct page *page,
+ struct list_head *to_kill, int force_early)
{
struct ksm_stable_node *stable_node;
struct ksm_rmap_item *rmap_item;
- struct folio *folio = page_folio(page);
struct vm_area_struct *vma;
struct task_struct *tsk;
@@ -3229,11 +3232,11 @@ void folio_migrate_ksm(struct folio *newfolio, struct folio *folio)
/*
* newfolio->mapping was set in advance; now we need smp_wmb()
* to make sure that the new stable_node->kpfn is visible
- * to get_ksm_page() before it can see that folio->mapping
+ * to ksm_get_folio() before it can see that folio->mapping
* has gone stale (or that folio_test_swapcache has been cleared).
*/
smp_wmb();
- set_page_stable_node(&folio->page, NULL);
+ folio_set_stable_node(folio, NULL);
}
}
#endif /* CONFIG_MIGRATION */
@@ -3256,7 +3259,7 @@ static bool stable_node_dup_remove_range(struct ksm_stable_node *stable_node,
if (stable_node->kpfn >= start_pfn &&
stable_node->kpfn < end_pfn) {
/*
- * Don't get_ksm_page, page has already gone:
+ * Don't ksm_get_folio, page has already gone:
* which is why we keep kpfn instead of page*
*/
remove_node_from_stable_tree(stable_node);
@@ -3344,7 +3347,7 @@ static int ksm_memory_callback(struct notifier_block *self,
* Most of the work is done by page migration; but there might
* be a few stable_nodes left over, still pointing to struct
* pages which have been offlined: prune those from the tree,
- * otherwise get_ksm_page() might later try to access a
+ * otherwise ksm_get_folio() might later try to access a
* non-existent struct page.
*/
ksm_check_stable_tree(mn->start_pfn,
@@ -3370,7 +3373,7 @@ static void wait_while_offlining(void)
#ifdef CONFIG_PROC_FS
long ksm_process_profit(struct mm_struct *mm)
{
- return (long)(mm->ksm_merging_pages + mm->ksm_zero_pages) * PAGE_SIZE -
+ return (long)(mm->ksm_merging_pages + mm_ksm_zero_pages(mm)) * PAGE_SIZE -
mm->ksm_rmap_items * sizeof(struct ksm_rmap_item);
}
#endif /* CONFIG_PROC_FS */
@@ -3659,7 +3662,7 @@ KSM_ATTR_RO(pages_skipped);
static ssize_t ksm_zero_pages_show(struct kobject *kobj,
struct kobj_attribute *attr, char *buf)
{
- return sysfs_emit(buf, "%ld\n", ksm_zero_pages);
+ return sysfs_emit(buf, "%ld\n", atomic_long_read(&ksm_zero_pages));
}
KSM_ATTR_RO(ksm_zero_pages);
@@ -3668,7 +3671,7 @@ static ssize_t general_profit_show(struct kobject *kobj,
{
long general_profit;
- general_profit = (ksm_pages_sharing + ksm_zero_pages) * PAGE_SIZE -
+ general_profit = (ksm_pages_sharing + atomic_long_read(&ksm_zero_pages)) * PAGE_SIZE -
ksm_rmap_items * sizeof(struct ksm_rmap_item);
return sysfs_emit(buf, "%ld\n", general_profit);