summaryrefslogtreecommitdiffstats
path: root/fs/proc
diff options
context:
space:
mode:
Diffstat (limited to 'fs/proc')
-rw-r--r--fs/proc/base.c2
-rw-r--r--fs/proc/fd.c46
-rw-r--r--fs/proc/inode.c10
-rw-r--r--fs/proc/meminfo.c3
-rw-r--r--fs/proc/page.c69
-rw-r--r--fs/proc/proc_net.c1
-rw-r--r--fs/proc/proc_sysctl.c21
-rw-r--r--fs/proc/task_mmu.c126
-rw-r--r--fs/proc/vmcore.c7
9 files changed, 149 insertions, 136 deletions
diff --git a/fs/proc/base.c b/fs/proc/base.c
index 18550c071d71..72a1acd03675 100644
--- a/fs/proc/base.c
+++ b/fs/proc/base.c
@@ -3214,7 +3214,7 @@ static int proc_pid_ksm_stat(struct seq_file *m, struct pid_namespace *ns,
mm = get_task_mm(task);
if (mm) {
seq_printf(m, "ksm_rmap_items %lu\n", mm->ksm_rmap_items);
- seq_printf(m, "ksm_zero_pages %lu\n", mm->ksm_zero_pages);
+ seq_printf(m, "ksm_zero_pages %ld\n", mm_ksm_zero_pages(mm));
seq_printf(m, "ksm_merging_pages %lu\n", mm->ksm_merging_pages);
seq_printf(m, "ksm_process_profit %ld\n", ksm_process_profit(mm));
mmput(mm);
diff --git a/fs/proc/fd.c b/fs/proc/fd.c
index 6e72e5ad42bc..586bbc84ca04 100644
--- a/fs/proc/fd.c
+++ b/fs/proc/fd.c
@@ -39,10 +39,8 @@ static int seq_show(struct seq_file *m, void *v)
spin_lock(&files->file_lock);
file = files_lookup_fd_locked(files, fd);
if (file) {
- struct fdtable *fdt = files_fdtable(files);
-
f_flags = file->f_flags;
- if (close_on_exec(fd, fdt))
+ if (close_on_exec(fd, files))
f_flags |= O_CLOEXEC;
get_file(file);
@@ -74,7 +72,18 @@ out:
return 0;
}
-static int proc_fdinfo_access_allowed(struct inode *inode)
+static int seq_fdinfo_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, seq_show, inode);
+}
+
+/**
+ * Shared /proc/pid/fdinfo and /proc/pid/fdinfo/fd permission helper to ensure
+ * that the current task has PTRACE_MODE_READ in addition to the normal
+ * POSIX-like checks.
+ */
+static int proc_fdinfo_permission(struct mnt_idmap *idmap, struct inode *inode,
+ int mask)
{
bool allowed = false;
struct task_struct *task = get_proc_task(inode);
@@ -88,18 +97,13 @@ static int proc_fdinfo_access_allowed(struct inode *inode)
if (!allowed)
return -EACCES;
- return 0;
+ return generic_permission(idmap, inode, mask);
}
-static int seq_fdinfo_open(struct inode *inode, struct file *file)
-{
- int ret = proc_fdinfo_access_allowed(inode);
-
- if (ret)
- return ret;
-
- return single_open(file, seq_show, inode);
-}
+static const struct inode_operations proc_fdinfo_file_inode_operations = {
+ .permission = proc_fdinfo_permission,
+ .setattr = proc_setattr,
+};
static const struct file_operations proc_fdinfo_file_operations = {
.open = seq_fdinfo_open,
@@ -388,6 +392,8 @@ static struct dentry *proc_fdinfo_instantiate(struct dentry *dentry,
ei = PROC_I(inode);
ei->fd = data->fd;
+ inode->i_op = &proc_fdinfo_file_inode_operations;
+
inode->i_fop = &proc_fdinfo_file_operations;
tid_fd_update_inode(task, inode, 0);
@@ -407,23 +413,13 @@ static int proc_readfdinfo(struct file *file, struct dir_context *ctx)
proc_fdinfo_instantiate);
}
-static int proc_open_fdinfo(struct inode *inode, struct file *file)
-{
- int ret = proc_fdinfo_access_allowed(inode);
-
- if (ret)
- return ret;
-
- return 0;
-}
-
const struct inode_operations proc_fdinfo_inode_operations = {
.lookup = proc_lookupfdinfo,
+ .permission = proc_fdinfo_permission,
.setattr = proc_setattr,
};
const struct file_operations proc_fdinfo_operations = {
- .open = proc_open_fdinfo,
.read = generic_read_dir,
.iterate_shared = proc_readfdinfo,
.llseek = generic_file_llseek,
diff --git a/fs/proc/inode.c b/fs/proc/inode.c
index dcd513dccf55..d19434e2a58e 100644
--- a/fs/proc/inode.c
+++ b/fs/proc/inode.c
@@ -451,15 +451,13 @@ pde_get_unmapped_area(struct proc_dir_entry *pde, struct file *file, unsigned lo
unsigned long len, unsigned long pgoff,
unsigned long flags)
{
- typeof_member(struct proc_ops, proc_get_unmapped_area) get_area;
+ if (pde->proc_ops->proc_get_unmapped_area)
+ return pde->proc_ops->proc_get_unmapped_area(file, orig_addr, len, pgoff, flags);
- get_area = pde->proc_ops->proc_get_unmapped_area;
#ifdef CONFIG_MMU
- if (!get_area)
- get_area = current->mm->get_unmapped_area;
+ return mm_get_unmapped_area(current->mm, file, orig_addr, len, pgoff, flags);
#endif
- if (get_area)
- return get_area(file, orig_addr, len, pgoff, flags);
+
return orig_addr;
}
diff --git a/fs/proc/meminfo.c b/fs/proc/meminfo.c
index 45af9a989d40..245171d9164b 100644
--- a/fs/proc/meminfo.c
+++ b/fs/proc/meminfo.c
@@ -89,8 +89,7 @@ static int meminfo_proc_show(struct seq_file *m, void *v)
show_val_kb(m, "SwapTotal: ", i.totalswap);
show_val_kb(m, "SwapFree: ", i.freeswap);
#ifdef CONFIG_ZSWAP
- seq_printf(m, "Zswap: %8lu kB\n",
- (unsigned long)(zswap_pool_total_size >> 10));
+ show_val_kb(m, "Zswap: ", zswap_total_pages());
seq_printf(m, "Zswapped: %8lu kB\n",
(unsigned long)atomic_read(&zswap_stored_pages) <<
(PAGE_SHIFT - 10));
diff --git a/fs/proc/page.c b/fs/proc/page.c
index 9223856c934b..2fb64bdb64eb 100644
--- a/fs/proc/page.c
+++ b/fs/proc/page.c
@@ -107,10 +107,13 @@ static inline u64 kpf_copy_bit(u64 kflags, int ubit, int kbit)
return ((kflags >> kbit) & 1) << ubit;
}
-u64 stable_page_flags(struct page *page)
+u64 stable_page_flags(const struct page *page)
{
- u64 k;
- u64 u;
+ const struct folio *folio;
+ unsigned long k;
+ unsigned long mapping;
+ bool is_anon;
+ u64 u = 0;
/*
* pseudo flag: KPF_NOPAGE
@@ -118,49 +121,47 @@ u64 stable_page_flags(struct page *page)
*/
if (!page)
return 1 << KPF_NOPAGE;
+ folio = page_folio(page);
- k = page->flags;
- u = 0;
+ k = folio->flags;
+ mapping = (unsigned long)folio->mapping;
+ is_anon = mapping & PAGE_MAPPING_ANON;
/*
* pseudo flags for the well known (anonymous) memory mapped pages
*/
if (page_mapped(page))
u |= 1 << KPF_MMAP;
- if (PageAnon(page))
+ if (is_anon) {
u |= 1 << KPF_ANON;
- if (PageKsm(page))
- u |= 1 << KPF_KSM;
+ if (mapping & PAGE_MAPPING_KSM)
+ u |= 1 << KPF_KSM;
+ }
/*
* compound pages: export both head/tail info
* they together define a compound page's start/end pos and order
*/
- if (PageHead(page))
- u |= 1 << KPF_COMPOUND_HEAD;
- if (PageTail(page))
+ if (page == &folio->page)
+ u |= kpf_copy_bit(k, KPF_COMPOUND_HEAD, PG_head);
+ else
u |= 1 << KPF_COMPOUND_TAIL;
- if (PageHuge(page))
+ if (folio_test_hugetlb(folio))
u |= 1 << KPF_HUGE;
/*
- * PageTransCompound can be true for non-huge compound pages (slab
- * pages or pages allocated by drivers with __GFP_COMP) because it
- * just checks PG_head/PG_tail, so we need to check PageLRU/PageAnon
+ * We need to check PageLRU/PageAnon
* to make sure a given page is a thp, not a non-huge compound page.
*/
- else if (PageTransCompound(page)) {
- struct page *head = compound_head(page);
-
- if (PageLRU(head) || PageAnon(head))
+ else if (folio_test_large(folio)) {
+ if ((k & (1 << PG_lru)) || is_anon)
u |= 1 << KPF_THP;
- else if (is_huge_zero_page(head)) {
+ else if (is_huge_zero_folio(folio)) {
u |= 1 << KPF_ZERO_PAGE;
u |= 1 << KPF_THP;
}
} else if (is_zero_pfn(page_to_pfn(page)))
u |= 1 << KPF_ZERO_PAGE;
-
/*
* Caveats on high order pages: PG_buddy and PG_slab will only be set
* on the head page.
@@ -174,16 +175,17 @@ u64 stable_page_flags(struct page *page)
u |= 1 << KPF_OFFLINE;
if (PageTable(page))
u |= 1 << KPF_PGTABLE;
+ if (folio_test_slab(folio))
+ u |= 1 << KPF_SLAB;
- if (page_is_idle(page))
+#if defined(CONFIG_PAGE_IDLE_FLAG) && defined(CONFIG_64BIT)
+ u |= kpf_copy_bit(k, KPF_IDLE, PG_idle);
+#else
+ if (folio_test_idle(folio))
u |= 1 << KPF_IDLE;
+#endif
u |= kpf_copy_bit(k, KPF_LOCKED, PG_locked);
-
- u |= kpf_copy_bit(k, KPF_SLAB, PG_slab);
- if (PageTail(page) && PageSlab(page))
- u |= 1 << KPF_SLAB;
-
u |= kpf_copy_bit(k, KPF_ERROR, PG_error);
u |= kpf_copy_bit(k, KPF_DIRTY, PG_dirty);
u |= kpf_copy_bit(k, KPF_UPTODATE, PG_uptodate);
@@ -194,7 +196,8 @@ u64 stable_page_flags(struct page *page)
u |= kpf_copy_bit(k, KPF_ACTIVE, PG_active);
u |= kpf_copy_bit(k, KPF_RECLAIM, PG_reclaim);
- if (PageSwapCache(page))
+#define SWAPCACHE ((1 << PG_swapbacked) | (1 << PG_swapcache))
+ if ((k & SWAPCACHE) == SWAPCACHE)
u |= 1 << KPF_SWAPCACHE;
u |= kpf_copy_bit(k, KPF_SWAPBACKED, PG_swapbacked);
@@ -202,7 +205,10 @@ u64 stable_page_flags(struct page *page)
u |= kpf_copy_bit(k, KPF_MLOCKED, PG_mlocked);
#ifdef CONFIG_MEMORY_FAILURE
- u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison);
+ if (u & (1 << KPF_HUGE))
+ u |= kpf_copy_bit(k, KPF_HWPOISON, PG_hwpoison);
+ else
+ u |= kpf_copy_bit(page->flags, KPF_HWPOISON, PG_hwpoison);
#endif
#ifdef CONFIG_ARCH_USES_PG_UNCACHED
@@ -228,7 +234,6 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf,
{
const unsigned long max_dump_pfn = get_max_dump_pfn();
u64 __user *out = (u64 __user *)buf;
- struct page *ppage;
unsigned long src = *ppos;
unsigned long pfn;
ssize_t ret = 0;
@@ -245,9 +250,9 @@ static ssize_t kpageflags_read(struct file *file, char __user *buf,
* TODO: ZONE_DEVICE support requires to identify
* memmaps that were actually initialized.
*/
- ppage = pfn_to_online_page(pfn);
+ struct page *page = pfn_to_online_page(pfn);
- if (put_user(stable_page_flags(ppage), out)) {
+ if (put_user(stable_page_flags(page), out)) {
ret = -EFAULT;
break;
}
diff --git a/fs/proc/proc_net.c b/fs/proc/proc_net.c
index 2ba31b6d68c0..52f0b75cbce2 100644
--- a/fs/proc/proc_net.c
+++ b/fs/proc/proc_net.c
@@ -135,6 +135,7 @@ EXPORT_SYMBOL_GPL(proc_create_net_data);
* @parent: The parent directory in which to create.
* @ops: The seq_file ops with which to read the file.
* @write: The write method with which to 'modify' the file.
+ * @state_size: The size of the per-file private state to allocate.
* @data: Data for retrieval by pde_data().
*
* Create a network namespaced proc file in the @parent directory with the
diff --git a/fs/proc/proc_sysctl.c b/fs/proc/proc_sysctl.c
index 37cde0efee57..b1c2c0b82116 100644
--- a/fs/proc/proc_sysctl.c
+++ b/fs/proc/proc_sysctl.c
@@ -30,9 +30,7 @@ static const struct file_operations proc_sys_dir_file_operations;
static const struct inode_operations proc_sys_dir_operations;
/* Support for permanently empty directories */
-static struct ctl_table sysctl_mount_point[] = {
- {.type = SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY }
-};
+static struct ctl_table sysctl_mount_point[] = { };
/**
* register_sysctl_mount_point() - registers a sysctl mount point
@@ -48,14 +46,12 @@ struct ctl_table_header *register_sysctl_mount_point(const char *path)
}
EXPORT_SYMBOL(register_sysctl_mount_point);
-#define sysctl_is_perm_empty_ctl_table(tptr) \
- (tptr[0].type == SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY)
#define sysctl_is_perm_empty_ctl_header(hptr) \
- (sysctl_is_perm_empty_ctl_table(hptr->ctl_table))
+ (hptr->type == SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY)
#define sysctl_set_perm_empty_ctl_header(hptr) \
- (hptr->ctl_table[0].type = SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY)
+ (hptr->type = SYSCTL_TABLE_TYPE_PERMANENTLY_EMPTY)
#define sysctl_clear_perm_empty_ctl_header(hptr) \
- (hptr->ctl_table[0].type = SYSCTL_TABLE_TYPE_DEFAULT)
+ (hptr->type = SYSCTL_TABLE_TYPE_DEFAULT)
void proc_sys_poll_notify(struct ctl_table_poll *poll)
{
@@ -210,6 +206,8 @@ static void init_header(struct ctl_table_header *head,
node++;
}
}
+ if (table == sysctl_mount_point)
+ sysctl_set_perm_empty_ctl_header(head);
}
static void erase_header(struct ctl_table_header *head)
@@ -232,8 +230,7 @@ static int insert_header(struct ctl_dir *dir, struct ctl_table_header *header)
return -EROFS;
/* Am I creating a permanently empty directory? */
- if (header->ctl_table_size > 0 &&
- sysctl_is_perm_empty_ctl_table(header->ctl_table)) {
+ if (sysctl_is_perm_empty_ctl_header(header)) {
if (!RB_EMPTY_ROOT(&dir->root))
return -EINVAL;
sysctl_set_perm_empty_ctl_header(dir_h);
@@ -480,7 +477,7 @@ static struct inode *proc_sys_make_inode(struct super_block *sb,
}
if (root->set_ownership)
- root->set_ownership(head, table, &inode->i_uid, &inode->i_gid);
+ root->set_ownership(head, &inode->i_uid, &inode->i_gid);
else {
inode->i_uid = GLOBAL_ROOT_UID;
inode->i_gid = GLOBAL_ROOT_GID;
@@ -1204,7 +1201,7 @@ static bool get_links(struct ctl_dir *dir,
struct ctl_table *entry, *link;
if (header->ctl_table_size == 0 ||
- sysctl_is_perm_empty_ctl_table(header->ctl_table))
+ sysctl_is_perm_empty_ctl_header(header))
return true;
/* Are there links available for every entry in table? */
diff --git a/fs/proc/task_mmu.c b/fs/proc/task_mmu.c
index 23fbab954c20..f8d35f993fe5 100644
--- a/fs/proc/task_mmu.c
+++ b/fs/proc/task_mmu.c
@@ -411,14 +411,14 @@ struct mem_size_stats {
};
static void smaps_page_accumulate(struct mem_size_stats *mss,
- struct page *page, unsigned long size, unsigned long pss,
+ struct folio *folio, unsigned long size, unsigned long pss,
bool dirty, bool locked, bool private)
{
mss->pss += pss;
- if (PageAnon(page))
+ if (folio_test_anon(folio))
mss->pss_anon += pss;
- else if (PageSwapBacked(page))
+ else if (folio_test_swapbacked(folio))
mss->pss_shmem += pss;
else
mss->pss_file += pss;
@@ -426,7 +426,7 @@ static void smaps_page_accumulate(struct mem_size_stats *mss,
if (locked)
mss->pss_locked += pss;
- if (dirty || PageDirty(page)) {
+ if (dirty || folio_test_dirty(folio)) {
mss->pss_dirty += pss;
if (private)
mss->private_dirty += size;
@@ -444,6 +444,7 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
bool compound, bool young, bool dirty, bool locked,
bool migration)
{
+ struct folio *folio = page_folio(page);
int i, nr = compound ? compound_nr(page) : 1;
unsigned long size = nr * PAGE_SIZE;
@@ -451,27 +452,28 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
* First accumulate quantities that depend only on |size| and the type
* of the compound page.
*/
- if (PageAnon(page)) {
+ if (folio_test_anon(folio)) {
mss->anonymous += size;
- if (!PageSwapBacked(page) && !dirty && !PageDirty(page))
+ if (!folio_test_swapbacked(folio) && !dirty &&
+ !folio_test_dirty(folio))
mss->lazyfree += size;
}
- if (PageKsm(page))
+ if (folio_test_ksm(folio))
mss->ksm += size;
mss->resident += size;
/* Accumulate the size in pages that have been accessed. */
- if (young || page_is_young(page) || PageReferenced(page))
+ if (young || folio_test_young(folio) || folio_test_referenced(folio))
mss->referenced += size;
/*
* Then accumulate quantities that may depend on sharing, or that may
* differ page-by-page.
*
- * page_count(page) == 1 guarantees the page is mapped exactly once.
+ * refcount == 1 guarantees the page is mapped exactly once.
* If any subpage of the compound page mapped with PTE it would elevate
- * page_count().
+ * the refcount.
*
* The page_mapcount() is called to get a snapshot of the mapcount.
* Without holding the page lock this snapshot can be slightly wrong as
@@ -480,9 +482,9 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
* especially for migration entries. Treat regular migration entries
* as mapcount == 1.
*/
- if ((page_count(page) == 1) || migration) {
- smaps_page_accumulate(mss, page, size, size << PSS_SHIFT, dirty,
- locked, true);
+ if ((folio_ref_count(folio) == 1) || migration) {
+ smaps_page_accumulate(mss, folio, size, size << PSS_SHIFT,
+ dirty, locked, true);
return;
}
for (i = 0; i < nr; i++, page++) {
@@ -490,8 +492,8 @@ static void smaps_account(struct mem_size_stats *mss, struct page *page,
unsigned long pss = PAGE_SIZE << PSS_SHIFT;
if (mapcount >= 2)
pss /= mapcount;
- smaps_page_accumulate(mss, page, PAGE_SIZE, pss, dirty, locked,
- mapcount < 2);
+ smaps_page_accumulate(mss, folio, PAGE_SIZE, pss,
+ dirty, locked, mapcount < 2);
}
}
@@ -576,6 +578,7 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
struct vm_area_struct *vma = walk->vma;
bool locked = !!(vma->vm_flags & VM_LOCKED);
struct page *page = NULL;
+ struct folio *folio;
bool migration = false;
if (pmd_present(*pmd)) {
@@ -590,11 +593,12 @@ static void smaps_pmd_entry(pmd_t *pmd, unsigned long addr,
}
if (IS_ERR_OR_NULL(page))
return;
- if (PageAnon(page))
+ folio = page_folio(page);
+ if (folio_test_anon(folio))
mss->anonymous_thp += HPAGE_PMD_SIZE;
- else if (PageSwapBacked(page))
+ else if (folio_test_swapbacked(folio))
mss->shmem_thp += HPAGE_PMD_SIZE;
- else if (is_zone_device_page(page))
+ else if (folio_is_zone_device(folio))
/* pass */;
else
mss->file_thp += HPAGE_PMD_SIZE;
@@ -726,19 +730,20 @@ static int smaps_hugetlb_range(pte_t *pte, unsigned long hmask,
{
struct mem_size_stats *mss = walk->private;
struct vm_area_struct *vma = walk->vma;
- struct page *page = NULL;
- pte_t ptent = ptep_get(pte);
+ pte_t ptent = huge_ptep_get(pte);
+ struct folio *folio = NULL;
if (pte_present(ptent)) {
- page = vm_normal_page(vma, addr, ptent);
+ folio = page_folio(pte_page(ptent));
} else if (is_swap_pte(ptent)) {
swp_entry_t swpent = pte_to_swp_entry(ptent);
if (is_pfn_swap_entry(swpent))
- page = pfn_swap_entry_to_page(swpent);
+ folio = pfn_swap_entry_folio(swpent);
}
- if (page) {
- if (page_mapcount(page) >= 2 || hugetlb_pmd_shared(pte))
+ if (folio) {
+ if (folio_likely_mapped_shared(folio) ||
+ hugetlb_pmd_shared(pte))
mss->shared_hugetlb += huge_page_size(hstate_vma(vma));
else
mss->private_hugetlb += huge_page_size(hstate_vma(vma));
@@ -866,8 +871,8 @@ static int show_smap(struct seq_file *m, void *v)
__show_smap(m, &mss, false);
seq_printf(m, "THPeligible: %8u\n",
- !!thp_vma_allowable_orders(vma, vma->vm_flags, true, false,
- true, THP_ORDERS_ALL));
+ !!thp_vma_allowable_orders(vma, vma->vm_flags,
+ TVA_SMAPS | TVA_ENFORCE_SYSFS, THP_ORDERS_ALL));
if (arch_pkeys_enabled())
seq_printf(m, "ProtectionKey: %8u\n", vma_pkey(vma));
@@ -965,12 +970,17 @@ static int show_smaps_rollup(struct seq_file *m, void *v)
break;
/* Case 1 and 2 above */
- if (vma->vm_start >= last_vma_end)
+ if (vma->vm_start >= last_vma_end) {
+ smap_gather_stats(vma, &mss, 0);
+ last_vma_end = vma->vm_end;
continue;
+ }
/* Case 4 above */
- if (vma->vm_end > last_vma_end)
+ if (vma->vm_end > last_vma_end) {
smap_gather_stats(vma, &mss, last_vma_end);
+ last_vma_end = vma->vm_end;
+ }
}
} for_each_vma(vmi, vma);
@@ -1161,7 +1171,7 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
struct vm_area_struct *vma = walk->vma;
pte_t *pte, ptent;
spinlock_t *ptl;
- struct page *page;
+ struct folio *folio;
ptl = pmd_trans_huge_lock(pmd, vma);
if (ptl) {
@@ -1173,12 +1183,12 @@ static int clear_refs_pte_range(pmd_t *pmd, unsigned long addr,
if (!pmd_present(*pmd))
goto out;
- page = pmd_page(*pmd);
+ folio = pmd_folio(*pmd);
/* Clear accessed and referenced bits. */
pmdp_test_and_clear_young(vma, addr, pmd);
- test_and_clear_page_young(page);
- ClearPageReferenced(page);
+ folio_test_clear_young(folio);
+ folio_clear_referenced(folio);
out:
spin_unlock(ptl);
return 0;
@@ -1200,14 +1210,14 @@ out:
if (!pte_present(ptent))
continue;
- page = vm_normal_page(vma, addr, ptent);
- if (!page)
+ folio = vm_normal_folio(vma, addr, ptent);
+ if (!folio)
continue;
/* Clear accessed and referenced bits. */
ptep_test_and_clear_young(vma, addr, pte);
- test_and_clear_page_young(page);
- ClearPageReferenced(page);
+ folio_test_clear_young(folio);
+ folio_clear_referenced(folio);
}
pte_unmap_unlock(pte - 1, ptl);
cond_resched();
@@ -1574,12 +1584,13 @@ static int pagemap_hugetlb_range(pte_t *ptep, unsigned long hmask,
pte = huge_ptep_get(ptep);
if (pte_present(pte)) {
- struct page *page = pte_page(pte);
+ struct folio *folio = page_folio(pte_page(pte));
- if (!PageAnon(page))
+ if (!folio_test_anon(folio))
flags |= PM_FILE;
- if (page_mapcount(page) == 1)
+ if (!folio_likely_mapped_shared(folio) &&
+ !hugetlb_pmd_shared(ptep))
flags |= PM_MMAP_EXCLUSIVE;
if (huge_pte_uffd_wp(pte))
@@ -1817,15 +1828,13 @@ static unsigned long pagemap_page_category(struct pagemap_scan_private *p,
}
static void make_uffd_wp_pte(struct vm_area_struct *vma,
- unsigned long addr, pte_t *pte)
+ unsigned long addr, pte_t *pte, pte_t ptent)
{
- pte_t ptent = ptep_get(pte);
-
if (pte_present(ptent)) {
pte_t old_pte;
old_pte = ptep_modify_prot_start(vma, addr, pte);
- ptent = pte_mkuffd_wp(ptent);
+ ptent = pte_mkuffd_wp(old_pte);
ptep_modify_prot_commit(vma, addr, pte, old_pte, ptent);
} else if (is_swap_pte(ptent)) {
ptent = pte_swp_mkuffd_wp(ptent);
@@ -2175,9 +2184,12 @@ static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start,
if ((p->arg.flags & PM_SCAN_WP_MATCHING) && !p->vec_out) {
/* Fast path for performing exclusive WP */
for (addr = start; addr != end; pte++, addr += PAGE_SIZE) {
- if (pte_uffd_wp(ptep_get(pte)))
+ pte_t ptent = ptep_get(pte);
+
+ if ((pte_present(ptent) && pte_uffd_wp(ptent)) ||
+ pte_swp_uffd_wp_any(ptent))
continue;
- make_uffd_wp_pte(vma, addr, pte);
+ make_uffd_wp_pte(vma, addr, pte, ptent);
if (!flush_end)
start = addr;
flush_end = addr + PAGE_SIZE;
@@ -2190,8 +2202,10 @@ static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start,
p->arg.return_mask == PAGE_IS_WRITTEN) {
for (addr = start; addr < end; pte++, addr += PAGE_SIZE) {
unsigned long next = addr + PAGE_SIZE;
+ pte_t ptent = ptep_get(pte);
- if (pte_uffd_wp(ptep_get(pte)))
+ if ((pte_present(ptent) && pte_uffd_wp(ptent)) ||
+ pte_swp_uffd_wp_any(ptent))
continue;
ret = pagemap_scan_output(p->cur_vma_category | PAGE_IS_WRITTEN,
p, addr, &next);
@@ -2199,7 +2213,7 @@ static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start,
break;
if (~p->arg.flags & PM_SCAN_WP_MATCHING)
continue;
- make_uffd_wp_pte(vma, addr, pte);
+ make_uffd_wp_pte(vma, addr, pte, ptent);
if (!flush_end)
start = addr;
flush_end = next;
@@ -2208,8 +2222,9 @@ static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start,
}
for (addr = start; addr != end; pte++, addr += PAGE_SIZE) {
+ pte_t ptent = ptep_get(pte);
unsigned long categories = p->cur_vma_category |
- pagemap_page_category(p, vma, addr, ptep_get(pte));
+ pagemap_page_category(p, vma, addr, ptent);
unsigned long next = addr + PAGE_SIZE;
if (!pagemap_scan_is_interesting_page(categories, p))
@@ -2224,7 +2239,7 @@ static int pagemap_scan_pmd_entry(pmd_t *pmd, unsigned long start,
if (~categories & PAGE_IS_WRITTEN)
continue;
- make_uffd_wp_pte(vma, addr, pte);
+ make_uffd_wp_pte(vma, addr, pte, ptent);
if (!flush_end)
start = addr;
flush_end = next;
@@ -2547,28 +2562,29 @@ struct numa_maps_private {
static void gather_stats(struct page *page, struct numa_maps *md, int pte_dirty,
unsigned long nr_pages)
{
+ struct folio *folio = page_folio(page);
int count = page_mapcount(page);
md->pages += nr_pages;
- if (pte_dirty || PageDirty(page))
+ if (pte_dirty || folio_test_dirty(folio))
md->dirty += nr_pages;
- if (PageSwapCache(page))
+ if (folio_test_swapcache(folio))
md->swapcache += nr_pages;
- if (PageActive(page) || PageUnevictable(page))
+ if (folio_test_active(folio) || folio_test_unevictable(folio))
md->active += nr_pages;
- if (PageWriteback(page))
+ if (folio_test_writeback(folio))
md->writeback += nr_pages;
- if (PageAnon(page))
+ if (folio_test_anon(folio))
md->anon += nr_pages;
if (count > md->mapcount_max)
md->mapcount_max = count;
- md->node[page_to_nid(page)] += nr_pages;
+ md->node[folio_nid(folio)] += nr_pages;
}
static struct page *can_gather_numa_stats(pte_t pte, struct vm_area_struct *vma,
diff --git a/fs/proc/vmcore.c b/fs/proc/vmcore.c
index 1fb213f379a5..b52d85f8ad59 100644
--- a/fs/proc/vmcore.c
+++ b/fs/proc/vmcore.c
@@ -383,6 +383,8 @@ static ssize_t __read_vmcore(struct iov_iter *iter, loff_t *fpos)
/* leave now if filled buffer already */
if (!iov_iter_count(iter))
return acc;
+
+ cond_resched();
}
list_for_each_entry(m, &vmcore_list, list) {
@@ -1370,9 +1372,8 @@ static void vmcoredd_write_header(void *buf, struct vmcoredd_data *data,
vdd_hdr->n_descsz = size + sizeof(vdd_hdr->dump_name);
vdd_hdr->n_type = NT_VMCOREDD;
- strncpy((char *)vdd_hdr->name, VMCOREDD_NOTE_NAME,
- sizeof(vdd_hdr->name));
- memcpy(vdd_hdr->dump_name, data->dump_name, sizeof(vdd_hdr->dump_name));
+ strscpy_pad(vdd_hdr->name, VMCOREDD_NOTE_NAME);
+ strscpy_pad(vdd_hdr->dump_name, data->dump_name);
}
/**