summaryrefslogtreecommitdiffstats
path: root/mm
diff options
context:
space:
mode:
authorMiaohe Lin <linmiaohe@huawei.com>2020-10-15 20:09:58 -0700
committerLinus Torvalds <torvalds@linux-foundation.org>2020-10-16 11:11:19 -0700
commit0e9aa6755757ea5e06e6130d5e50a93442456499 (patch)
tree5833de43fd9ec033507d114291b1266abe43375b /mm
parented0173733dd468883198c3136284394320b8fad6 (diff)
downloadlinux-0e9aa6755757ea5e06e6130d5e50a93442456499.tar.gz
linux-0e9aa6755757ea5e06e6130d5e50a93442456499.tar.bz2
linux-0e9aa6755757ea5e06e6130d5e50a93442456499.zip
mm: fix some broken comments
Fix some broken comments including typo, grammar error and wrong function name. Signed-off-by: Miaohe Lin <linmiaohe@huawei.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Link: https://lkml.kernel.org/r/20200913095456.54873-1-linmiaohe@huawei.com Signed-off-by: Linus Torvalds <torvalds@linux-foundation.org>
Diffstat (limited to 'mm')
-rw-r--r--mm/filemap.c4
-rw-r--r--mm/swap_state.c2
2 files changed, 3 insertions, 3 deletions
diff --git a/mm/filemap.c b/mm/filemap.c
index 8aad142ec4be..45e564a20f8e 100644
--- a/mm/filemap.c
+++ b/mm/filemap.c
@@ -1445,7 +1445,7 @@ static inline bool clear_bit_unlock_is_negative_byte(long nr, volatile void *mem
* unlock_page - unlock a locked page
* @page: the page
*
- * Unlocks the page and wakes up sleepers in ___wait_on_page_locked().
+ * Unlocks the page and wakes up sleepers in wait_on_page_locked().
* Also wakes sleepers in wait_on_page_writeback() because the wakeup
* mechanism between PageLocked pages and PageWriteback pages is shared.
* But that's OK - sleepers in wait_on_page_writeback() just go back to sleep.
@@ -3004,7 +3004,7 @@ filler:
goto out;
/*
- * Page is not up to date and may be locked due one of the following
+ * Page is not up to date and may be locked due to one of the following
* case a: Page is being filled and the page lock is held
* case b: Read/write error clearing the page uptodate status
* case c: Truncation in progress (page locked)
diff --git a/mm/swap_state.c b/mm/swap_state.c
index aa40e706604c..ee465827420e 100644
--- a/mm/swap_state.c
+++ b/mm/swap_state.c
@@ -246,7 +246,7 @@ int add_to_swap(struct page *page)
goto fail;
/*
* Normally the page will be dirtied in unmap because its pte should be
- * dirty. A special case is MADV_FREE page. The page'e pte could have
+ * dirty. A special case is MADV_FREE page. The page's pte could have
* dirty bit cleared but the page's SwapBacked bit is still set because
* clearing the dirty bit and SwapBacked bit has no lock protected. For
* such page, unmap will not set dirty bit for it, so page reclaim will