diff options
author | Mike Rapoport <rppt@linux.vnet.ibm.com> | 2018-03-21 21:22:47 +0200 |
---|---|---|
committer | Jonathan Corbet <corbet@lwn.net> | 2018-04-16 14:18:15 -0600 |
commit | ad56b738c5dd223a2f66685830f82194025a6138 (patch) | |
tree | 3994f40f1f93aec279d0b5c9117c0085a9f9ab03 /mm | |
parent | 3406bb5c64a091ad887c3fb339ad88e9e88ef938 (diff) | |
download | linux-stable-ad56b738c5dd223a2f66685830f82194025a6138.tar.gz linux-stable-ad56b738c5dd223a2f66685830f82194025a6138.tar.bz2 linux-stable-ad56b738c5dd223a2f66685830f82194025a6138.zip |
docs/vm: rename documentation files to .rst
Signed-off-by: Mike Rapoport <rppt@linux.vnet.ibm.com>
Signed-off-by: Jonathan Corbet <corbet@lwn.net>
Diffstat (limited to 'mm')
-rw-r--r-- | mm/Kconfig | 6 | ||||
-rw-r--r-- | mm/cleancache.c | 2 | ||||
-rw-r--r-- | mm/frontswap.c | 2 | ||||
-rw-r--r-- | mm/hmm.c | 2 | ||||
-rw-r--r-- | mm/huge_memory.c | 4 | ||||
-rw-r--r-- | mm/hugetlb.c | 4 | ||||
-rw-r--r-- | mm/ksm.c | 4 | ||||
-rw-r--r-- | mm/mmap.c | 2 | ||||
-rw-r--r-- | mm/rmap.c | 6 | ||||
-rw-r--r-- | mm/util.c | 2 |
10 files changed, 17 insertions, 17 deletions
diff --git a/mm/Kconfig b/mm/Kconfig index c782e8fb7235..b9f04213a353 100644 --- a/mm/Kconfig +++ b/mm/Kconfig @@ -312,7 +312,7 @@ config KSM the many instances by a single page with that content, so saving memory until one or another app needs to modify the content. Recommended for use with KVM, or with other duplicative applications. - See Documentation/vm/ksm.txt for more information: KSM is inactive + See Documentation/vm/ksm.rst for more information: KSM is inactive until a program has madvised that an area is MADV_MERGEABLE, and root has set /sys/kernel/mm/ksm/run to 1 (if CONFIG_SYSFS is set). @@ -537,7 +537,7 @@ config MEM_SOFT_DIRTY into a page just as regular dirty bit, but unlike the latter it can be cleared by hands. - See Documentation/vm/soft-dirty.txt for more details. + See Documentation/vm/soft-dirty.rst for more details. config ZSWAP bool "Compressed cache for swap pages (EXPERIMENTAL)" @@ -664,7 +664,7 @@ config IDLE_PAGE_TRACKING be useful to tune memory cgroup limits and/or for job placement within a compute cluster. - See Documentation/vm/idle_page_tracking.txt for more details. + See Documentation/vm/idle_page_tracking.rst for more details. # arch_add_memory() comprehends device memory config ARCH_HAS_ZONE_DEVICE diff --git a/mm/cleancache.c b/mm/cleancache.c index f7b9fdc79d97..126548b5a292 100644 --- a/mm/cleancache.c +++ b/mm/cleancache.c @@ -3,7 +3,7 @@ * * This code provides the generic "frontend" layer to call a matching * "backend" driver implementation of cleancache. See - * Documentation/vm/cleancache.txt for more information. + * Documentation/vm/cleancache.rst for more information. * * Copyright (C) 2009-2010 Oracle Corp. All rights reserved. * Author: Dan Magenheimer diff --git a/mm/frontswap.c b/mm/frontswap.c index fec8b5044040..4f5476a0f955 100644 --- a/mm/frontswap.c +++ b/mm/frontswap.c @@ -3,7 +3,7 @@ * * This code provides the generic "frontend" layer to call a matching * "backend" driver implementation of frontswap. See - * Documentation/vm/frontswap.txt for more information. + * Documentation/vm/frontswap.rst for more information. * * Copyright (C) 2009-2012 Oracle Corp. All rights reserved. * Author: Dan Magenheimer @@ -37,7 +37,7 @@ #if defined(CONFIG_DEVICE_PRIVATE) || defined(CONFIG_DEVICE_PUBLIC) /* - * Device private memory see HMM (Documentation/vm/hmm.txt) or hmm.h + * Device private memory see HMM (Documentation/vm/hmm.rst) or hmm.h */ DEFINE_STATIC_KEY_FALSE(device_private_key); EXPORT_SYMBOL(device_private_key); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index 87ab9b8f56b5..6d5911673450 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1185,7 +1185,7 @@ static int do_huge_pmd_wp_page_fallback(struct vm_fault *vmf, pmd_t orig_pmd, * mmu_notifier_invalidate_range_end() happens which can lead to a * device seeing memory write in different order than CPU. * - * See Documentation/vm/mmu_notifier.txt + * See Documentation/vm/mmu_notifier.rst */ pmdp_huge_clear_flush_notify(vma, haddr, vmf->pmd); @@ -2037,7 +2037,7 @@ static void __split_huge_zero_page_pmd(struct vm_area_struct *vma, * replacing a zero pmd write protected page with a zero pte write * protected page. * - * See Documentation/vm/mmu_notifier.txt + * See Documentation/vm/mmu_notifier.rst */ pmdp_huge_clear_flush(vma, haddr, pmd); diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 7c204e3d132b..5af974abae46 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -3289,7 +3289,7 @@ int copy_hugetlb_page_range(struct mm_struct *dst, struct mm_struct *src, * table protection not changing it to point * to a new page. * - * See Documentation/vm/mmu_notifier.txt + * See Documentation/vm/mmu_notifier.rst */ huge_ptep_set_wrprotect(src, addr, src_pte); } @@ -4355,7 +4355,7 @@ unsigned long hugetlb_change_protection(struct vm_area_struct *vma, * No need to call mmu_notifier_invalidate_range() we are downgrading * page table protection not changing it to point to a new page. * - * See Documentation/vm/mmu_notifier.txt + * See Documentation/vm/mmu_notifier.rst */ i_mmap_unlock_write(vma->vm_file->f_mapping); mmu_notifier_invalidate_range_end(mm, start, end); @@ -1049,7 +1049,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page, * No need to notify as we are downgrading page table to read * only not changing it to point to a new page. * - * See Documentation/vm/mmu_notifier.txt + * See Documentation/vm/mmu_notifier.rst */ entry = ptep_clear_flush(vma, pvmw.address, pvmw.pte); /* @@ -1138,7 +1138,7 @@ static int replace_page(struct vm_area_struct *vma, struct page *page, * No need to notify as we are replacing a read only page with another * read only page with the same content. * - * See Documentation/vm/mmu_notifier.txt + * See Documentation/vm/mmu_notifier.rst */ ptep_clear_flush(vma, addr, ptep); set_pte_at_notify(mm, addr, ptep, newpte); diff --git a/mm/mmap.c b/mm/mmap.c index 9efdc021ad22..39fc51d1639c 100644 --- a/mm/mmap.c +++ b/mm/mmap.c @@ -2769,7 +2769,7 @@ SYSCALL_DEFINE5(remap_file_pages, unsigned long, start, unsigned long, size, unsigned long ret = -EINVAL; struct file *file; - pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/vm/remap_file_pages.txt.\n", + pr_warn_once("%s (%d) uses deprecated remap_file_pages() syscall. See Documentation/vm/remap_file_pages.rst.\n", current->comm, current->pid); if (prot) diff --git a/mm/rmap.c b/mm/rmap.c index 47db27f8049e..854b703fbe2a 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -942,7 +942,7 @@ static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, * downgrading page table protection not changing it to point * to a new page. * - * See Documentation/vm/mmu_notifier.txt + * See Documentation/vm/mmu_notifier.rst */ if (ret) (*cleaned)++; @@ -1587,7 +1587,7 @@ static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, * point at new page while a device still is using this * page. * - * See Documentation/vm/mmu_notifier.txt + * See Documentation/vm/mmu_notifier.rst */ dec_mm_counter(mm, mm_counter_file(page)); } @@ -1597,7 +1597,7 @@ discard: * done above for all cases requiring it to happen under page * table lock before mmu_notifier_invalidate_range_end() * - * See Documentation/vm/mmu_notifier.txt + * See Documentation/vm/mmu_notifier.rst */ page_remove_rmap(subpage, PageHuge(page)); put_page(page); diff --git a/mm/util.c b/mm/util.c index c1250501364f..e857c80c6f4a 100644 --- a/mm/util.c +++ b/mm/util.c @@ -609,7 +609,7 @@ EXPORT_SYMBOL_GPL(vm_memory_committed); * succeed and -ENOMEM implies there is not. * * We currently support three overcommit policies, which are set via the - * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting + * vm.overcommit_memory sysctl. See Documentation/vm/overcommit-accounting.rst * * Strict overcommit modes added 2002 Feb 26 by Alan Cox. * Additional code 2002 Jul 20 by Robert Love. |