summaryrefslogtreecommitdiffstats
path: root/arch/s390/kernel/setup.c
diff options
context:
space:
mode:
Diffstat (limited to 'arch/s390/kernel/setup.c')
-rw-r--r--arch/s390/kernel/setup.c152
1 files changed, 48 insertions, 104 deletions
diff --git a/arch/s390/kernel/setup.c b/arch/s390/kernel/setup.c
index 8ec5cdf9dadc..fe10da1a271e 100644
--- a/arch/s390/kernel/setup.c
+++ b/arch/s390/kernel/setup.c
@@ -74,7 +74,7 @@
#include <asm/numa.h>
#include <asm/alternative.h>
#include <asm/nospec-branch.h>
-#include <asm/mem_detect.h>
+#include <asm/physmem_info.h>
#include <asm/maccess.h>
#include <asm/uv.h>
#include <asm/asm-offsets.h>
@@ -147,14 +147,10 @@ static u32 __amode31_ref *__ctl_duct = __ctl_duct_amode31;
int __bootdata(noexec_disabled);
unsigned long __bootdata(ident_map_size);
-struct mem_detect_info __bootdata(mem_detect);
-struct initrd_data __bootdata(initrd_data);
-unsigned long __bootdata(pgalloc_pos);
-unsigned long __bootdata(pgalloc_end);
-unsigned long __bootdata(pgalloc_low);
+struct physmem_info __bootdata(physmem_info);
unsigned long __bootdata_preserved(__kaslr_offset);
-unsigned long __bootdata(__amode31_base);
+int __bootdata_preserved(__kaslr_enabled);
unsigned int __bootdata_preserved(zlib_dfltcc_support);
EXPORT_SYMBOL(zlib_dfltcc_support);
u64 __bootdata_preserved(stfle_fac_list[16]);
@@ -385,39 +381,27 @@ void stack_free(unsigned long stack)
#endif
}
-int __init arch_early_irq_init(void)
+void __init __noreturn arch_call_rest_init(void)
{
- unsigned long stack;
-
- stack = __get_free_pages(GFP_KERNEL, THREAD_SIZE_ORDER);
- if (!stack)
- panic("Couldn't allocate async stack");
- S390_lowcore.async_stack = stack + STACK_INIT_OFFSET;
- return 0;
+ smp_reinit_ipl_cpu();
+ rest_init();
}
-void __init arch_call_rest_init(void)
+static unsigned long __init stack_alloc_early(void)
{
unsigned long stack;
- smp_reinit_ipl_cpu();
- stack = stack_alloc();
- if (!stack)
- panic("Couldn't allocate kernel stack");
- current->stack = (void *) stack;
-#ifdef CONFIG_VMAP_STACK
- current->stack_vm_area = (void *) stack;
-#endif
- set_task_stack_end_magic(current);
- stack += STACK_INIT_OFFSET;
- S390_lowcore.kernel_stack = stack;
- call_on_stack_noreturn(rest_init, stack);
+ stack = (unsigned long)memblock_alloc(THREAD_SIZE, THREAD_SIZE);
+ if (!stack) {
+ panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
+ __func__, THREAD_SIZE, THREAD_SIZE);
+ }
+ return stack;
}
static void __init setup_lowcore(void)
{
struct lowcore *lc, *abs_lc;
- unsigned long mcck_stack;
/*
* Setup lowcore for boot cpu
@@ -441,8 +425,6 @@ static void __init setup_lowcore(void)
lc->io_new_psw.mask = PSW_KERNEL_BITS | PSW_MASK_MCHECK;
lc->io_new_psw.addr = (unsigned long) io_int_handler;
lc->clock_comparator = clock_comparator_max;
- lc->nodat_stack = ((unsigned long) &init_thread_union)
- + THREAD_SIZE - STACK_FRAME_OVERHEAD - sizeof(struct pt_regs);
lc->current_task = (unsigned long)&init_task;
lc->lpp = LPP_MAGIC;
lc->machine_flags = S390_lowcore.machine_flags;
@@ -455,17 +437,15 @@ static void __init setup_lowcore(void)
lc->steal_timer = S390_lowcore.steal_timer;
lc->last_update_timer = S390_lowcore.last_update_timer;
lc->last_update_clock = S390_lowcore.last_update_clock;
-
/*
* Allocate the global restart stack which is the same for
- * all CPUs in cast *one* of them does a PSW restart.
+ * all CPUs in case *one* of them does a PSW restart.
*/
- restart_stack = memblock_alloc(THREAD_SIZE, THREAD_SIZE);
- if (!restart_stack)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, THREAD_SIZE, THREAD_SIZE);
- restart_stack += STACK_INIT_OFFSET;
-
+ restart_stack = (void *)(stack_alloc_early() + STACK_INIT_OFFSET);
+ lc->mcck_stack = stack_alloc_early() + STACK_INIT_OFFSET;
+ lc->async_stack = stack_alloc_early() + STACK_INIT_OFFSET;
+ lc->nodat_stack = stack_alloc_early() + STACK_INIT_OFFSET;
+ lc->kernel_stack = S390_lowcore.kernel_stack;
/*
* Set up PSW restart to call ipl.c:do_restart(). Copy the relevant
* restart data to the absolute zero lowcore. This is necessary if
@@ -476,13 +456,6 @@ static void __init setup_lowcore(void)
lc->restart_data = 0;
lc->restart_source = -1U;
__ctl_store(lc->cregs_save_area, 0, 15);
-
- mcck_stack = (unsigned long)memblock_alloc(THREAD_SIZE, THREAD_SIZE);
- if (!mcck_stack)
- panic("%s: Failed to allocate %lu bytes align=0x%lx\n",
- __func__, THREAD_SIZE, THREAD_SIZE);
- lc->mcck_stack = mcck_stack + STACK_INIT_OFFSET;
-
lc->spinlock_lockval = arch_spin_lockval(0);
lc->spinlock_index = 0;
arch_spin_lock_setup(0);
@@ -635,7 +608,11 @@ static struct notifier_block kdump_mem_nb = {
*/
static void __init reserve_pgtables(void)
{
- memblock_reserve(pgalloc_pos, pgalloc_end - pgalloc_pos);
+ unsigned long start, end;
+ struct reserved_range *range;
+
+ for_each_physmem_reserved_type_range(RR_VMEM, range, &start, &end)
+ memblock_reserve(start, end - start);
}
/*
@@ -712,13 +689,13 @@ static void __init reserve_crashkernel(void)
*/
static void __init reserve_initrd(void)
{
-#ifdef CONFIG_BLK_DEV_INITRD
- if (!initrd_data.start || !initrd_data.size)
+ unsigned long addr, size;
+
+ if (!IS_ENABLED(CONFIG_BLK_DEV_INITRD) || !get_physmem_reserved(RR_INITRD, &addr, &size))
return;
- initrd_start = (unsigned long)__va(initrd_data.start);
- initrd_end = initrd_start + initrd_data.size;
- memblock_reserve(initrd_data.start, initrd_data.size);
-#endif
+ initrd_start = (unsigned long)__va(addr);
+ initrd_end = initrd_start + size;
+ memblock_reserve(addr, size);
}
/*
@@ -730,72 +707,40 @@ static void __init reserve_certificate_list(void)
memblock_reserve(ipl_cert_list_addr, ipl_cert_list_size);
}
-static void __init reserve_mem_detect_info(void)
+static void __init reserve_physmem_info(void)
{
- unsigned long start, size;
+ unsigned long addr, size;
- get_mem_detect_reserved(&start, &size);
- if (size)
- memblock_reserve(start, size);
+ if (get_physmem_reserved(RR_MEM_DETECT_EXTENDED, &addr, &size))
+ memblock_reserve(addr, size);
}
-static void __init free_mem_detect_info(void)
+static void __init free_physmem_info(void)
{
- unsigned long start, size;
-
- get_mem_detect_reserved(&start, &size);
- if (size)
- memblock_phys_free(start, size);
-}
+ unsigned long addr, size;
-static const char * __init get_mem_info_source(void)
-{
- switch (mem_detect.info_source) {
- case MEM_DETECT_SCLP_STOR_INFO:
- return "sclp storage info";
- case MEM_DETECT_DIAG260:
- return "diag260";
- case MEM_DETECT_SCLP_READ_INFO:
- return "sclp read info";
- case MEM_DETECT_BIN_SEARCH:
- return "binary search";
- }
- return "none";
+ if (get_physmem_reserved(RR_MEM_DETECT_EXTENDED, &addr, &size))
+ memblock_phys_free(addr, size);
}
-static void __init memblock_add_mem_detect_info(void)
+static void __init memblock_add_physmem_info(void)
{
unsigned long start, end;
int i;
pr_debug("physmem info source: %s (%hhd)\n",
- get_mem_info_source(), mem_detect.info_source);
+ get_physmem_info_source(), physmem_info.info_source);
/* keep memblock lists close to the kernel */
memblock_set_bottom_up(true);
- for_each_mem_detect_usable_block(i, &start, &end)
+ for_each_physmem_usable_range(i, &start, &end)
memblock_add(start, end - start);
- for_each_mem_detect_block(i, &start, &end)
+ for_each_physmem_online_range(i, &start, &end)
memblock_physmem_add(start, end - start);
memblock_set_bottom_up(false);
memblock_set_node(0, ULONG_MAX, &memblock.memory, 0);
}
/*
- * Check for initrd being in usable memory
- */
-static void __init check_initrd(void)
-{
-#ifdef CONFIG_BLK_DEV_INITRD
- if (initrd_data.start && initrd_data.size &&
- !memblock_is_region_memory(initrd_data.start, initrd_data.size)) {
- pr_err("The initial RAM disk does not fit into the memory\n");
- memblock_phys_free(initrd_data.start, initrd_data.size);
- initrd_start = initrd_end = 0;
- }
-#endif
-}
-
-/*
* Reserve memory used for lowcore/command line/kernel image.
*/
static void __init reserve_kernel(void)
@@ -803,7 +748,7 @@ static void __init reserve_kernel(void)
memblock_reserve(0, STARTUP_NORMAL_OFFSET);
memblock_reserve(OLDMEM_BASE, sizeof(unsigned long));
memblock_reserve(OLDMEM_SIZE, sizeof(unsigned long));
- memblock_reserve(__amode31_base, __eamode31 - __samode31);
+ memblock_reserve(physmem_info.reserved[RR_AMODE31].start, __eamode31 - __samode31);
memblock_reserve(__pa(sclp_early_sccb), EXT_SCCB_READ_SCP);
memblock_reserve(__pa(_stext), _end - _stext);
}
@@ -825,13 +770,13 @@ static void __init setup_memory(void)
static void __init relocate_amode31_section(void)
{
unsigned long amode31_size = __eamode31 - __samode31;
- long amode31_offset = __amode31_base - __samode31;
+ long amode31_offset = physmem_info.reserved[RR_AMODE31].start - __samode31;
long *ptr;
pr_info("Relocating AMODE31 section of size 0x%08lx\n", amode31_size);
/* Move original AMODE31 section to the new one */
- memmove((void *)__amode31_base, (void *)__samode31, amode31_size);
+ memmove((void *)physmem_info.reserved[RR_AMODE31].start, (void *)__samode31, amode31_size);
/* Zero out the old AMODE31 section to catch invalid accesses within it */
memset((void *)__samode31, 0, amode31_size);
@@ -997,14 +942,14 @@ void __init setup_arch(char **cmdline_p)
reserve_kernel();
reserve_initrd();
reserve_certificate_list();
- reserve_mem_detect_info();
+ reserve_physmem_info();
memblock_set_current_limit(ident_map_size);
memblock_allow_resize();
/* Get information about *all* installed memory */
- memblock_add_mem_detect_info();
+ memblock_add_physmem_info();
- free_mem_detect_info();
+ free_physmem_info();
setup_memory_end();
memblock_dump_all();
setup_memory();
@@ -1017,7 +962,6 @@ void __init setup_arch(char **cmdline_p)
if (MACHINE_HAS_EDAT2)
hugetlb_cma_reserve(PUD_SHIFT - PAGE_SHIFT);
- check_initrd();
reserve_crashkernel();
#ifdef CONFIG_CRASH_DUMP
/*