diff options
Diffstat (limited to 'mm')
-rw-r--r-- | mm/page-writeback.c | 2 | ||||
-rw-r--r-- | mm/page_alloc.c | 2 | ||||
-rw-r--r-- | mm/percpu.c | 2 | ||||
-rw-r--r-- | mm/rmap.c | 2 | ||||
-rw-r--r-- | mm/sparse-vmemmap.c | 2 |
5 files changed, 5 insertions, 5 deletions
diff --git a/mm/page-writeback.c b/mm/page-writeback.c index b4edfe7ce06c..b5d8a1f820a0 100644 --- a/mm/page-writeback.c +++ b/mm/page-writeback.c @@ -404,7 +404,7 @@ unsigned long determine_dirtyable_memory(void) * - vm.dirty_background_ratio or vm.dirty_background_bytes * - vm.dirty_ratio or vm.dirty_bytes * The dirty limits will be lifted by 1/4 for PF_LESS_THROTTLE (ie. nfsd) and - * runtime tasks. + * real-time tasks. */ void global_dirty_limits(unsigned long *pbackground, unsigned long *pdirty) { diff --git a/mm/page_alloc.c b/mm/page_alloc.c index ff7e15872398..826ba6922e84 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -4014,7 +4014,7 @@ static void __init setup_usemap(struct pglist_data *pgdat, zone->pageblock_flags = alloc_bootmem_node(pgdat, usemapsize); } #else -static void inline setup_usemap(struct pglist_data *pgdat, +static inline void setup_usemap(struct pglist_data *pgdat, struct zone *zone, unsigned long zonesize) {} #endif /* CONFIG_SPARSEMEM */ diff --git a/mm/percpu.c b/mm/percpu.c index 3dd4984bdef8..3f930018aa60 100644 --- a/mm/percpu.c +++ b/mm/percpu.c @@ -258,7 +258,7 @@ static void __maybe_unused pcpu_next_pop(struct pcpu_chunk *chunk, /* * (Un)populated page region iterators. Iterate over (un)populated - * page regions betwen @start and @end in @chunk. @rs and @re should + * page regions between @start and @end in @chunk. @rs and @re should * be integer variables and will be set to start and end page index of * the current region. */ diff --git a/mm/rmap.c b/mm/rmap.c index 1a8bf76bfd03..c95d2ba27a0b 100644 --- a/mm/rmap.c +++ b/mm/rmap.c @@ -94,7 +94,7 @@ static void anon_vma_chain_free(struct anon_vma_chain *anon_vma_chain) * anonymous pages mapped into it with that anon_vma. * * The common case will be that we already have one, but if - * if not we either need to find an adjacent mapping that we + * not we either need to find an adjacent mapping that we * can re-use the anon_vma from (very common when the only * reason for splitting a vma has been mprotect()), or we * allocate a new one. diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index 29d6cbffb283..64b984091edb 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -9,7 +9,7 @@ * * However, virtual mappings need a page table and TLBs. Many Linux * architectures already map their physical space using 1-1 mappings - * via TLBs. For those arches the virtual memmory map is essentially + * via TLBs. For those arches the virtual memory map is essentially * for free if we use the same page size as the 1-1 mappings. In that * case the overhead consists of a few additional pages that are * allocated to create a view of memory for vmemmap. |