diff options
author | Vasily Gorbik <gor@linux.ibm.com> | 2023-01-28 23:55:04 +0100 |
---|---|---|
committer | Heiko Carstens <hca@linux.ibm.com> | 2023-02-06 11:13:54 +0100 |
commit | bf64f0517e5d0d8f3248143fc49535c1d1594b4f (patch) | |
tree | ccfc742dbf1f7d39fd3dbc308bcae23f938040ed /arch/s390/boot | |
parent | 22476f47b6b7fb7d066c71f67ebc11892adb0849 (diff) | |
download | linux-bf64f0517e5d0d8f3248143fc49535c1d1594b4f.tar.gz linux-bf64f0517e5d0d8f3248143fc49535c1d1594b4f.tar.bz2 linux-bf64f0517e5d0d8f3248143fc49535c1d1594b4f.zip |
s390/mem_detect: handle online memory limit just once
Introduce mem_detect_truncate() to cut any online memory ranges above
established identity mapping size, so that mem_detect users wouldn't
have to do it over and over again.
Suggested-by: Alexander Gordeev <agordeev@linux.ibm.com>
Reviewed-by: Alexander Gordeev <agordeev@linux.ibm.com>
Signed-off-by: Vasily Gorbik <gor@linux.ibm.com>
Signed-off-by: Heiko Carstens <hca@linux.ibm.com>
Diffstat (limited to 'arch/s390/boot')
-rw-r--r-- | arch/s390/boot/boot.h | 3 | ||||
-rw-r--r-- | arch/s390/boot/kaslr.c | 2 | ||||
-rw-r--r-- | arch/s390/boot/mem_detect.c | 18 | ||||
-rw-r--r-- | arch/s390/boot/startup.c | 3 | ||||
-rw-r--r-- | arch/s390/boot/vmem.c | 15 |
5 files changed, 28 insertions, 13 deletions
diff --git a/arch/s390/boot/boot.h b/arch/s390/boot/boot.h index 939d27da8fbd..e91bbb004efb 100644 --- a/arch/s390/boot/boot.h +++ b/arch/s390/boot/boot.h @@ -34,6 +34,7 @@ struct vmlinux_info { void startup_kernel(void); unsigned long detect_memory(unsigned long *safe_addr); +void mem_detect_truncate(unsigned long limit); bool is_ipl_block_dump(void); void store_ipl_parmblock(void); unsigned long read_ipl_report(unsigned long safe_addr); @@ -44,7 +45,7 @@ void print_missing_facilities(void); void sclp_early_setup_buffer(void); void print_pgm_check_info(void); unsigned long get_random_base(unsigned long safe_addr); -void setup_vmem(unsigned long ident_map_size, unsigned long asce_limit); +void setup_vmem(unsigned long asce_limit); void __printf(1, 2) decompressor_printk(const char *fmt, ...); void error(char *m); diff --git a/arch/s390/boot/kaslr.c b/arch/s390/boot/kaslr.c index 58a8d8c8a100..bbf6860ebc45 100644 --- a/arch/s390/boot/kaslr.c +++ b/arch/s390/boot/kaslr.c @@ -176,8 +176,6 @@ unsigned long get_random_base(unsigned long safe_addr) unsigned long base_pos, max_pos, kernel_size; int i; - memory_limit = min(memory_limit, ident_map_size); - /* * Avoid putting kernel in the end of physical memory * which kasan will use for shadow memory and early pgtable diff --git a/arch/s390/boot/mem_detect.c b/arch/s390/boot/mem_detect.c index daa159317183..3058d397a9da 100644 --- a/arch/s390/boot/mem_detect.c +++ b/arch/s390/boot/mem_detect.c @@ -171,3 +171,21 @@ unsigned long detect_memory(unsigned long *safe_addr) return max_physmem_end; } + +void mem_detect_truncate(unsigned long limit) +{ + struct mem_detect_block *block; + int i; + + for (i = 0; i < mem_detect.count; i++) { + block = __get_mem_detect_block_ptr(i); + if (block->start >= limit) { + mem_detect.count = i; + break; + } else if (block->end > limit) { + block->end = (u64)limit; + mem_detect.count = i + 1; + break; + } + } +} diff --git a/arch/s390/boot/startup.c b/arch/s390/boot/startup.c index 577ebec9971b..89beb31e982a 100644 --- a/arch/s390/boot/startup.c +++ b/arch/s390/boot/startup.c @@ -305,6 +305,7 @@ void startup_kernel(void) setup_ident_map_size(max_physmem_end); setup_vmalloc_size(); asce_limit = setup_kernel_memory_layout(); + mem_detect_truncate(ident_map_size); if (IS_ENABLED(CONFIG_RANDOMIZE_BASE) && kaslr_enabled) { random_lma = get_random_base(safe_addr); @@ -336,7 +337,7 @@ void startup_kernel(void) */ clear_bss_section(); handle_relocs(__kaslr_offset); - setup_vmem(ident_map_size, asce_limit); + setup_vmem(asce_limit); copy_bootdata(); if (__kaslr_offset) { diff --git a/arch/s390/boot/vmem.c b/arch/s390/boot/vmem.c index a35c251c9123..82ef57827042 100644 --- a/arch/s390/boot/vmem.c +++ b/arch/s390/boot/vmem.c @@ -39,7 +39,7 @@ static void boot_check_oom(void) error("out of memory on boot\n"); } -static void pgtable_populate_init(unsigned long ident_map_size) +static void pgtable_populate_init(void) { unsigned long initrd_end; unsigned long kernel_end; @@ -51,7 +51,7 @@ static void pgtable_populate_init(unsigned long ident_map_size) pgalloc_low = max(pgalloc_low, initrd_end); } - pgalloc_end = round_down(min(ident_map_size, get_mem_detect_end()), PAGE_SIZE); + pgalloc_end = round_down(get_mem_detect_end(), PAGE_SIZE); pgalloc_pos = pgalloc_end; boot_check_oom(); @@ -226,7 +226,7 @@ static void pgtable_populate(unsigned long addr, unsigned long end, enum populat } } -void setup_vmem(unsigned long ident_map_size, unsigned long asce_limit) +void setup_vmem(unsigned long asce_limit) { unsigned long start, end; unsigned long asce_type; @@ -250,13 +250,10 @@ void setup_vmem(unsigned long ident_map_size, unsigned long asce_limit) * To prevent creation of a large page at address 0 first map * the lowcore and create the identity mapping only afterwards. */ - pgtable_populate_init(ident_map_size); + pgtable_populate_init(); pgtable_populate(0, sizeof(struct lowcore), POPULATE_ONE2ONE); - for_each_mem_detect_block(i, &start, &end) { - if (start >= ident_map_size) - break; - pgtable_populate(start, min(end, ident_map_size), POPULATE_ONE2ONE); - } + for_each_mem_detect_block(i, &start, &end) + pgtable_populate(start, end, POPULATE_ONE2ONE); pgtable_populate(__abs_lowcore, __abs_lowcore + sizeof(struct lowcore), POPULATE_ABS_LOWCORE); pgtable_populate(__memcpy_real_area, __memcpy_real_area + PAGE_SIZE, |