summaryrefslogtreecommitdiffstats
path: root/arch/powerpc/kexec
diff options
context:
space:
mode:
Diffstat (limited to 'arch/powerpc/kexec')
-rw-r--r--arch/powerpc/kexec/Makefile4
-rw-r--r--arch/powerpc/kexec/core.c118
-rw-r--r--arch/powerpc/kexec/core_32.c1
-rw-r--r--arch/powerpc/kexec/core_64.c123
-rw-r--r--arch/powerpc/kexec/crash.c195
-rw-r--r--arch/powerpc/kexec/elf_64.c15
-rw-r--r--arch/powerpc/kexec/file_load_64.c656
-rw-r--r--arch/powerpc/kexec/ranges.c312
-rw-r--r--arch/powerpc/kexec/relocate_32.S7
9 files changed, 754 insertions, 677 deletions
diff --git a/arch/powerpc/kexec/Makefile b/arch/powerpc/kexec/Makefile
index 8e469c4da3f8..470eb0453e17 100644
--- a/arch/powerpc/kexec/Makefile
+++ b/arch/powerpc/kexec/Makefile
@@ -3,11 +3,11 @@
# Makefile for the linux kernel.
#
-obj-y += core.o core_$(BITS).o
+obj-y += core.o core_$(BITS).o ranges.o
obj-$(CONFIG_PPC32) += relocate_32.o
-obj-$(CONFIG_KEXEC_FILE) += file_load.o ranges.o file_load_$(BITS).o elf_$(BITS).o
+obj-$(CONFIG_KEXEC_FILE) += file_load.o file_load_$(BITS).o elf_$(BITS).o
obj-$(CONFIG_VMCORE_INFO) += vmcore_info.o
obj-$(CONFIG_CRASH_DUMP) += crash.o
diff --git a/arch/powerpc/kexec/core.c b/arch/powerpc/kexec/core.c
index b8333a49ea5d..00e9c267b912 100644
--- a/arch/powerpc/kexec/core.c
+++ b/arch/powerpc/kexec/core.c
@@ -22,28 +22,6 @@
#include <asm/setup.h>
#include <asm/firmware.h>
-void machine_kexec_mask_interrupts(void) {
- unsigned int i;
- struct irq_desc *desc;
-
- for_each_irq_desc(i, desc) {
- struct irq_chip *chip;
-
- chip = irq_desc_get_chip(desc);
- if (!chip)
- continue;
-
- if (chip->irq_eoi && irqd_irq_inprogress(&desc->irq_data))
- chip->irq_eoi(&desc->irq_data);
-
- if (chip->irq_mask)
- chip->irq_mask(&desc->irq_data);
-
- if (chip->irq_disable && !irqd_irq_disabled(&desc->irq_data))
- chip->irq_disable(&desc->irq_data);
- }
-}
-
#ifdef CONFIG_CRASH_DUMP
void machine_crash_shutdown(struct pt_regs *regs)
{
@@ -80,38 +58,20 @@ void machine_kexec(struct kimage *image)
}
#ifdef CONFIG_CRASH_RESERVE
-void __init reserve_crashkernel(void)
-{
- unsigned long long crash_size, crash_base, total_mem_sz;
- int ret;
-
- total_mem_sz = memory_limit ? memory_limit : memblock_phys_mem_size();
- /* use common parsing */
- ret = parse_crashkernel(boot_command_line, total_mem_sz,
- &crash_size, &crash_base, NULL, NULL);
- if (ret == 0 && crash_size > 0) {
- crashk_res.start = crash_base;
- crashk_res.end = crash_base + crash_size - 1;
- }
-
- if (crashk_res.end == crashk_res.start) {
- crashk_res.start = crashk_res.end = 0;
- return;
- }
- /* We might have got these values via the command line or the
- * device tree, either way sanitise them now. */
-
- crash_size = resource_size(&crashk_res);
+static unsigned long long __init get_crash_base(unsigned long long crash_base)
+{
#ifndef CONFIG_NONSTATIC_KERNEL
- if (crashk_res.start != KDUMP_KERNELBASE)
+ if (crash_base != KDUMP_KERNELBASE)
printk("Crash kernel location must be 0x%x\n",
KDUMP_KERNELBASE);
- crashk_res.start = KDUMP_KERNELBASE;
+ return KDUMP_KERNELBASE;
#else
- if (!crashk_res.start) {
+ unsigned long long crash_base_align;
+
+ if (!crash_base) {
#ifdef CONFIG_PPC64
/*
* On the LPAR platform place the crash kernel to mid of
@@ -123,53 +83,51 @@ void __init reserve_crashkernel(void)
* kernel starts at 128MB offset on other platforms.
*/
if (firmware_has_feature(FW_FEATURE_LPAR))
- crashk_res.start = min_t(u64, ppc64_rma_size / 2, SZ_512M);
+ crash_base = min_t(u64, ppc64_rma_size / 2, SZ_512M);
else
- crashk_res.start = min_t(u64, ppc64_rma_size / 2, SZ_128M);
+ crash_base = min_t(u64, ppc64_rma_size / 2, SZ_128M);
#else
- crashk_res.start = KDUMP_KERNELBASE;
+ crash_base = KDUMP_KERNELBASE;
#endif
}
- crash_base = PAGE_ALIGN(crashk_res.start);
- if (crash_base != crashk_res.start) {
- printk("Crash kernel base must be aligned to 0x%lx\n",
- PAGE_SIZE);
- crashk_res.start = crash_base;
- }
+ crash_base_align = PAGE_ALIGN(crash_base);
+ if (crash_base != crash_base_align)
+ pr_warn("Crash kernel base must be aligned to 0x%lx\n", PAGE_SIZE);
+ return crash_base_align;
#endif
- crash_size = PAGE_ALIGN(crash_size);
- crashk_res.end = crashk_res.start + crash_size - 1;
+}
- /* The crash region must not overlap the current kernel */
- if (overlaps_crashkernel(__pa(_stext), _end - _stext)) {
- printk(KERN_WARNING
- "Crash kernel can not overlap current kernel\n");
- crashk_res.start = crashk_res.end = 0;
+void __init arch_reserve_crashkernel(void)
+{
+ unsigned long long crash_size, crash_base, crash_end;
+ unsigned long long kernel_start, kernel_size;
+ unsigned long long total_mem_sz;
+ int ret;
+
+ total_mem_sz = memory_limit ? memory_limit : memblock_phys_mem_size();
+
+ /* use common parsing */
+ ret = parse_crashkernel(boot_command_line, total_mem_sz, &crash_size,
+ &crash_base, NULL, NULL);
+
+ if (ret)
return;
- }
- /* Crash kernel trumps memory limit */
- if (memory_limit && memory_limit <= crashk_res.end) {
- memory_limit = crashk_res.end + 1;
- total_mem_sz = memory_limit;
- printk("Adjusted memory limit for crashkernel, now 0x%llx\n",
- memory_limit);
- }
+ crash_base = get_crash_base(crash_base);
+ crash_end = crash_base + crash_size - 1;
- printk(KERN_INFO "Reserving %ldMB of memory at %ldMB "
- "for crashkernel (System RAM: %ldMB)\n",
- (unsigned long)(crash_size >> 20),
- (unsigned long)(crashk_res.start >> 20),
- (unsigned long)(total_mem_sz >> 20));
+ kernel_start = __pa(_stext);
+ kernel_size = _end - _stext;
- if (!memblock_is_region_memory(crashk_res.start, crash_size) ||
- memblock_reserve(crashk_res.start, crash_size)) {
- pr_err("Failed to reserve memory for crashkernel!\n");
- crashk_res.start = crashk_res.end = 0;
+ /* The crash region must not overlap the current kernel */
+ if ((kernel_start + kernel_size > crash_base) && (kernel_start <= crash_end)) {
+ pr_warn("Crash kernel can not overlap current kernel\n");
return;
}
+
+ reserve_crashkernel_generic(crash_size, crash_base, 0, false);
}
int __init overlaps_crashkernel(unsigned long start, unsigned long size)
diff --git a/arch/powerpc/kexec/core_32.c b/arch/powerpc/kexec/core_32.c
index c95f96850c9e..deb28eb44f30 100644
--- a/arch/powerpc/kexec/core_32.c
+++ b/arch/powerpc/kexec/core_32.c
@@ -7,6 +7,7 @@
* Copyright (C) 2005 IBM Corporation.
*/
+#include <linux/irq.h>
#include <linux/kexec.h>
#include <linux/mm.h>
#include <linux/string.h>
diff --git a/arch/powerpc/kexec/core_64.c b/arch/powerpc/kexec/core_64.c
index 762e4d09aacf..222aa326dace 100644
--- a/arch/powerpc/kexec/core_64.c
+++ b/arch/powerpc/kexec/core_64.c
@@ -17,6 +17,7 @@
#include <linux/cpu.h>
#include <linux/hardirq.h>
#include <linux/of.h>
+#include <linux/libfdt.h>
#include <asm/page.h>
#include <asm/current.h>
@@ -26,10 +27,12 @@
#include <asm/paca.h>
#include <asm/mmu.h>
#include <asm/sections.h> /* _end */
+#include <asm/setup.h>
#include <asm/smp.h>
#include <asm/hw_breakpoint.h>
#include <asm/svm.h>
#include <asm/ultravisor.h>
+#include <asm/crashdump-ppc64.h>
int machine_kexec_prepare(struct kimage *image)
{
@@ -315,6 +318,16 @@ void default_machine_kexec(struct kimage *image)
if (!kdump_in_progress())
kexec_prepare_cpus();
+#ifdef CONFIG_PPC_PSERIES
+ /*
+ * This must be done after other CPUs have shut down, otherwise they
+ * could execute the 'scv' instruction, which is not supported with
+ * reloc disabled (see configure_exceptions()).
+ */
+ if (firmware_has_feature(FW_FEATURE_SET_MODE))
+ pseries_disable_reloc_on_exc();
+#endif
+
printk("kexec: Starting switchover sequence.\n");
/* switch to a staticly allocated stack. Based on irq stack code.
@@ -419,3 +432,113 @@ static int __init export_htab_values(void)
}
late_initcall(export_htab_values);
#endif /* CONFIG_PPC_64S_HASH_MMU */
+
+#if defined(CONFIG_KEXEC_FILE) || defined(CONFIG_CRASH_DUMP)
+/**
+ * add_node_props - Reads node properties from device node structure and add
+ * them to fdt.
+ * @fdt: Flattened device tree of the kernel
+ * @node_offset: offset of the node to add a property at
+ * @dn: device node pointer
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+static int add_node_props(void *fdt, int node_offset, const struct device_node *dn)
+{
+ int ret = 0;
+ struct property *pp;
+
+ if (!dn)
+ return -EINVAL;
+
+ for_each_property_of_node(dn, pp) {
+ ret = fdt_setprop(fdt, node_offset, pp->name, pp->value, pp->length);
+ if (ret < 0) {
+ pr_err("Unable to add %s property: %s\n", pp->name, fdt_strerror(ret));
+ return ret;
+ }
+ }
+ return ret;
+}
+
+/**
+ * update_cpus_node - Update cpus node of flattened device tree using of_root
+ * device node.
+ * @fdt: Flattened device tree of the kernel.
+ *
+ * Returns 0 on success, negative errno on error.
+ *
+ * Note: expecting no subnodes under /cpus/<node> with device_type == "cpu".
+ * If this changes, update this function to include them.
+ */
+int update_cpus_node(void *fdt)
+{
+ int prev_node_offset;
+ const char *device_type;
+ const struct fdt_property *prop;
+ struct device_node *cpus_node, *dn;
+ int cpus_offset, cpus_subnode_offset, ret = 0;
+
+ cpus_offset = fdt_path_offset(fdt, "/cpus");
+ if (cpus_offset < 0 && cpus_offset != -FDT_ERR_NOTFOUND) {
+ pr_err("Malformed device tree: error reading /cpus node: %s\n",
+ fdt_strerror(cpus_offset));
+ return cpus_offset;
+ }
+
+ prev_node_offset = cpus_offset;
+ /* Delete sub-nodes of /cpus node with device_type == "cpu" */
+ for (cpus_subnode_offset = fdt_first_subnode(fdt, cpus_offset); cpus_subnode_offset >= 0;) {
+ /* Ignore nodes that do not have a device_type property or device_type != "cpu" */
+ prop = fdt_get_property(fdt, cpus_subnode_offset, "device_type", NULL);
+ if (!prop || strcmp(prop->data, "cpu")) {
+ prev_node_offset = cpus_subnode_offset;
+ goto next_node;
+ }
+
+ ret = fdt_del_node(fdt, cpus_subnode_offset);
+ if (ret < 0) {
+ pr_err("Failed to delete a cpus sub-node: %s\n", fdt_strerror(ret));
+ return ret;
+ }
+next_node:
+ if (prev_node_offset == cpus_offset)
+ cpus_subnode_offset = fdt_first_subnode(fdt, cpus_offset);
+ else
+ cpus_subnode_offset = fdt_next_subnode(fdt, prev_node_offset);
+ }
+
+ cpus_node = of_find_node_by_path("/cpus");
+ /* Fail here to avoid kexec/kdump kernel boot hung */
+ if (!cpus_node) {
+ pr_err("No /cpus node found\n");
+ return -EINVAL;
+ }
+
+ /* Add all /cpus sub-nodes of device_type == "cpu" to FDT */
+ for_each_child_of_node(cpus_node, dn) {
+ /* Ignore device nodes that do not have a device_type property
+ * or device_type != "cpu".
+ */
+ device_type = of_get_property(dn, "device_type", NULL);
+ if (!device_type || strcmp(device_type, "cpu"))
+ continue;
+
+ cpus_subnode_offset = fdt_add_subnode(fdt, cpus_offset, dn->full_name);
+ if (cpus_subnode_offset < 0) {
+ pr_err("Unable to add %s subnode: %s\n", dn->full_name,
+ fdt_strerror(cpus_subnode_offset));
+ ret = cpus_subnode_offset;
+ goto out;
+ }
+
+ ret = add_node_props(fdt, cpus_subnode_offset, dn);
+ if (ret < 0)
+ goto out;
+ }
+out:
+ of_node_put(cpus_node);
+ of_node_put(dn);
+ return ret;
+}
+#endif /* CONFIG_KEXEC_FILE || CONFIG_CRASH_DUMP */
diff --git a/arch/powerpc/kexec/crash.c b/arch/powerpc/kexec/crash.c
index ef5c2d25ec39..9ac3266e4965 100644
--- a/arch/powerpc/kexec/crash.c
+++ b/arch/powerpc/kexec/crash.c
@@ -16,6 +16,8 @@
#include <linux/delay.h>
#include <linux/irq.h>
#include <linux/types.h>
+#include <linux/libfdt.h>
+#include <linux/memory.h>
#include <asm/processor.h>
#include <asm/machdep.h>
@@ -24,6 +26,7 @@
#include <asm/setjmp.h>
#include <asm/debug.h>
#include <asm/interrupt.h>
+#include <asm/kexec_ranges.h>
/*
* The primary CPU waits a while for all secondary CPUs to enter. This is to
@@ -392,3 +395,195 @@ void default_machine_crash_shutdown(struct pt_regs *regs)
if (ppc_md.kexec_cpu_down)
ppc_md.kexec_cpu_down(1, 0);
}
+
+#ifdef CONFIG_CRASH_HOTPLUG
+#undef pr_fmt
+#define pr_fmt(fmt) "crash hp: " fmt
+
+/*
+ * Advertise preferred elfcorehdr size to userspace via
+ * /sys/kernel/crash_elfcorehdr_size sysfs interface.
+ */
+unsigned int arch_crash_get_elfcorehdr_size(void)
+{
+ unsigned long phdr_cnt;
+
+ /* A program header for possible CPUs + vmcoreinfo */
+ phdr_cnt = num_possible_cpus() + 1;
+ if (IS_ENABLED(CONFIG_MEMORY_HOTPLUG))
+ phdr_cnt += CONFIG_CRASH_MAX_MEMORY_RANGES;
+
+ return sizeof(struct elfhdr) + (phdr_cnt * sizeof(Elf64_Phdr));
+}
+
+/**
+ * update_crash_elfcorehdr() - Recreate the elfcorehdr and replace it with old
+ * elfcorehdr in the kexec segment array.
+ * @image: the active struct kimage
+ * @mn: struct memory_notify data handler
+ */
+static void update_crash_elfcorehdr(struct kimage *image, struct memory_notify *mn)
+{
+ int ret;
+ struct crash_mem *cmem = NULL;
+ struct kexec_segment *ksegment;
+ void *ptr, *mem, *elfbuf = NULL;
+ unsigned long elfsz, memsz, base_addr, size;
+
+ ksegment = &image->segment[image->elfcorehdr_index];
+ mem = (void *) ksegment->mem;
+ memsz = ksegment->memsz;
+
+ ret = get_crash_memory_ranges(&cmem);
+ if (ret) {
+ pr_err("Failed to get crash mem range\n");
+ return;
+ }
+
+ /*
+ * The hot unplugged memory is part of crash memory ranges,
+ * remove it here.
+ */
+ if (image->hp_action == KEXEC_CRASH_HP_REMOVE_MEMORY) {
+ base_addr = PFN_PHYS(mn->start_pfn);
+ size = mn->nr_pages * PAGE_SIZE;
+ ret = remove_mem_range(&cmem, base_addr, size);
+ if (ret) {
+ pr_err("Failed to remove hot-unplugged memory from crash memory ranges\n");
+ goto out;
+ }
+ }
+
+ ret = crash_prepare_elf64_headers(cmem, false, &elfbuf, &elfsz);
+ if (ret) {
+ pr_err("Failed to prepare elf header\n");
+ goto out;
+ }
+
+ /*
+ * It is unlikely that kernel hit this because elfcorehdr kexec
+ * segment (memsz) is built with addition space to accommodate growing
+ * number of crash memory ranges while loading the kdump kernel. It is
+ * Just to avoid any unforeseen case.
+ */
+ if (elfsz > memsz) {
+ pr_err("Updated crash elfcorehdr elfsz %lu > memsz %lu", elfsz, memsz);
+ goto out;
+ }
+
+ ptr = __va(mem);
+ if (ptr) {
+ /* Temporarily invalidate the crash image while it is replaced */
+ xchg(&kexec_crash_image, NULL);
+
+ /* Replace the old elfcorehdr with newly prepared elfcorehdr */
+ memcpy((void *)ptr, elfbuf, elfsz);
+
+ /* The crash image is now valid once again */
+ xchg(&kexec_crash_image, image);
+ }
+out:
+ kvfree(cmem);
+ kvfree(elfbuf);
+}
+
+/**
+ * get_fdt_index - Loop through the kexec segment array and find
+ * the index of the FDT segment.
+ * @image: a pointer to kexec_crash_image
+ *
+ * Returns the index of FDT segment in the kexec segment array
+ * if found; otherwise -1.
+ */
+static int get_fdt_index(struct kimage *image)
+{
+ void *ptr;
+ unsigned long mem;
+ int i, fdt_index = -1;
+
+ /* Find the FDT segment index in kexec segment array. */
+ for (i = 0; i < image->nr_segments; i++) {
+ mem = image->segment[i].mem;
+ ptr = __va(mem);
+
+ if (ptr && fdt_magic(ptr) == FDT_MAGIC) {
+ fdt_index = i;
+ break;
+ }
+ }
+
+ return fdt_index;
+}
+
+/**
+ * update_crash_fdt - updates the cpus node of the crash FDT.
+ *
+ * @image: a pointer to kexec_crash_image
+ */
+static void update_crash_fdt(struct kimage *image)
+{
+ void *fdt;
+ int fdt_index;
+
+ fdt_index = get_fdt_index(image);
+ if (fdt_index < 0) {
+ pr_err("Unable to locate FDT segment.\n");
+ return;
+ }
+
+ fdt = __va((void *)image->segment[fdt_index].mem);
+
+ /* Temporarily invalidate the crash image while it is replaced */
+ xchg(&kexec_crash_image, NULL);
+
+ /* update FDT to reflect changes in CPU resources */
+ if (update_cpus_node(fdt))
+ pr_err("Failed to update crash FDT");
+
+ /* The crash image is now valid once again */
+ xchg(&kexec_crash_image, image);
+}
+
+int arch_crash_hotplug_support(struct kimage *image, unsigned long kexec_flags)
+{
+#ifdef CONFIG_KEXEC_FILE
+ if (image->file_mode)
+ return 1;
+#endif
+ return kexec_flags & KEXEC_CRASH_HOTPLUG_SUPPORT;
+}
+
+/**
+ * arch_crash_handle_hotplug_event - Handle crash CPU/Memory hotplug events to update the
+ * necessary kexec segments based on the hotplug event.
+ * @image: a pointer to kexec_crash_image
+ * @arg: struct memory_notify handler for memory hotplug case and NULL for CPU hotplug case.
+ *
+ * Update the kdump image based on the type of hotplug event, represented by image->hp_action.
+ * CPU add: Update the FDT segment to include the newly added CPU.
+ * CPU remove: No action is needed, with the assumption that it's okay to have offline CPUs
+ * part of the FDT.
+ * Memory add/remove: No action is taken as this is not yet supported.
+ */
+void arch_crash_handle_hotplug_event(struct kimage *image, void *arg)
+{
+ struct memory_notify *mn;
+
+ switch (image->hp_action) {
+ case KEXEC_CRASH_HP_REMOVE_CPU:
+ return;
+
+ case KEXEC_CRASH_HP_ADD_CPU:
+ update_crash_fdt(image);
+ break;
+
+ case KEXEC_CRASH_HP_REMOVE_MEMORY:
+ case KEXEC_CRASH_HP_ADD_MEMORY:
+ mn = (struct memory_notify *)arg;
+ update_crash_elfcorehdr(image, mn);
+ return;
+ default:
+ pr_warn_once("Unknown hotplug action\n");
+ }
+}
+#endif /* CONFIG_CRASH_HOTPLUG */
diff --git a/arch/powerpc/kexec/elf_64.c b/arch/powerpc/kexec/elf_64.c
index 6d8951e8e966..5d6d616404cf 100644
--- a/arch/powerpc/kexec/elf_64.c
+++ b/arch/powerpc/kexec/elf_64.c
@@ -23,6 +23,7 @@
#include <linux/of_fdt.h>
#include <linux/slab.h>
#include <linux/types.h>
+#include <asm/kexec_ranges.h>
static void *elf64_load(struct kimage *image, char *kernel_buf,
unsigned long kernel_len, char *initrd,
@@ -36,6 +37,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
const void *slave_code;
struct elfhdr ehdr;
char *modified_cmdline = NULL;
+ struct crash_mem *rmem = NULL;
struct kexec_elf_info elf_info;
struct kexec_buf kbuf = { .image = image, .buf_min = 0,
.buf_max = ppc64_rma_size };
@@ -102,21 +104,25 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
kexec_dprintk("Loaded initrd at 0x%lx\n", initrd_load_addr);
}
+ ret = get_reserved_memory_ranges(&rmem);
+ if (ret)
+ goto out;
+
fdt = of_kexec_alloc_and_setup_fdt(image, initrd_load_addr,
initrd_len, cmdline,
- kexec_extra_fdt_size_ppc64(image));
+ kexec_extra_fdt_size_ppc64(image, rmem));
if (!fdt) {
pr_err("Error setting up the new device tree.\n");
ret = -EINVAL;
goto out;
}
- ret = setup_new_fdt_ppc64(image, fdt, initrd_load_addr,
- initrd_len, cmdline);
+ ret = setup_new_fdt_ppc64(image, fdt, rmem);
if (ret)
goto out_free_fdt;
- fdt_pack(fdt);
+ if (!IS_ENABLED(CONFIG_CRASH_HOTPLUG) || image->type != KEXEC_TYPE_CRASH)
+ fdt_pack(fdt);
kbuf.buffer = fdt;
kbuf.bufsz = kbuf.memsz = fdt_totalsize(fdt);
@@ -145,6 +151,7 @@ static void *elf64_load(struct kimage *image, char *kernel_buf,
out_free_fdt:
kvfree(fdt);
out:
+ kfree(rmem);
kfree(modified_cmdline);
kexec_free_elf_info(&elf_info);
diff --git a/arch/powerpc/kexec/file_load_64.c b/arch/powerpc/kexec/file_load_64.c
index 1bc65de6174f..e7ef8b2a2554 100644
--- a/arch/powerpc/kexec/file_load_64.c
+++ b/arch/powerpc/kexec/file_load_64.c
@@ -18,6 +18,7 @@
#include <linux/of_fdt.h>
#include <linux/libfdt.h>
#include <linux/of.h>
+#include <linux/of_address.h>
#include <linux/memblock.h>
#include <linux/slab.h>
#include <linux/vmalloc.h>
@@ -30,6 +31,7 @@
#include <asm/iommu.h>
#include <asm/prom.h>
#include <asm/plpks.h>
+#include <asm/cputhreads.h>
struct umem_info {
__be64 *buf; /* data buffer for usable-memory property */
@@ -47,395 +49,22 @@ const struct kexec_file_ops * const kexec_file_loaders[] = {
NULL
};
-/**
- * get_exclude_memory_ranges - Get exclude memory ranges. This list includes
- * regions like opal/rtas, tce-table, initrd,
- * kernel, htab which should be avoided while
- * setting up kexec load segments.
- * @mem_ranges: Range list to add the memory ranges to.
- *
- * Returns 0 on success, negative errno on error.
- */
-static int get_exclude_memory_ranges(struct crash_mem **mem_ranges)
-{
- int ret;
-
- ret = add_tce_mem_ranges(mem_ranges);
- if (ret)
- goto out;
-
- ret = add_initrd_mem_range(mem_ranges);
- if (ret)
- goto out;
-
- ret = add_htab_mem_range(mem_ranges);
- if (ret)
- goto out;
-
- ret = add_kernel_mem_range(mem_ranges);
- if (ret)
- goto out;
-
- ret = add_rtas_mem_range(mem_ranges);
- if (ret)
- goto out;
-
- ret = add_opal_mem_range(mem_ranges);
- if (ret)
- goto out;
-
- ret = add_reserved_mem_ranges(mem_ranges);
- if (ret)
- goto out;
-
- /* exclude memory ranges should be sorted for easy lookup */
- sort_memory_ranges(*mem_ranges, true);
-out:
- if (ret)
- pr_err("Failed to setup exclude memory ranges\n");
- return ret;
-}
-
-/**
- * get_reserved_memory_ranges - Get reserve memory ranges. This list includes
- * memory regions that should be added to the
- * memory reserve map to ensure the region is
- * protected from any mischief.
- * @mem_ranges: Range list to add the memory ranges to.
- *
- * Returns 0 on success, negative errno on error.
- */
-static int get_reserved_memory_ranges(struct crash_mem **mem_ranges)
+int arch_check_excluded_range(struct kimage *image, unsigned long start,
+ unsigned long end)
{
- int ret;
-
- ret = add_rtas_mem_range(mem_ranges);
- if (ret)
- goto out;
-
- ret = add_tce_mem_ranges(mem_ranges);
- if (ret)
- goto out;
-
- ret = add_reserved_mem_ranges(mem_ranges);
-out:
- if (ret)
- pr_err("Failed to setup reserved memory ranges\n");
- return ret;
-}
-
-/**
- * __locate_mem_hole_top_down - Looks top down for a large enough memory hole
- * in the memory regions between buf_min & buf_max
- * for the buffer. If found, sets kbuf->mem.
- * @kbuf: Buffer contents and memory parameters.
- * @buf_min: Minimum address for the buffer.
- * @buf_max: Maximum address for the buffer.
- *
- * Returns 0 on success, negative errno on error.
- */
-static int __locate_mem_hole_top_down(struct kexec_buf *kbuf,
- u64 buf_min, u64 buf_max)
-{
- int ret = -EADDRNOTAVAIL;
- phys_addr_t start, end;
- u64 i;
-
- for_each_mem_range_rev(i, &start, &end) {
- /*
- * memblock uses [start, end) convention while it is
- * [start, end] here. Fix the off-by-one to have the
- * same convention.
- */
- end -= 1;
-
- if (start > buf_max)
- continue;
-
- /* Memory hole not found */
- if (end < buf_min)
- break;
-
- /* Adjust memory region based on the given range */
- if (start < buf_min)
- start = buf_min;
- if (end > buf_max)
- end = buf_max;
-
- start = ALIGN(start, kbuf->buf_align);
- if (start < end && (end - start + 1) >= kbuf->memsz) {
- /* Suitable memory range found. Set kbuf->mem */
- kbuf->mem = ALIGN_DOWN(end - kbuf->memsz + 1,
- kbuf->buf_align);
- ret = 0;
- break;
- }
- }
-
- return ret;
-}
-
-/**
- * locate_mem_hole_top_down_ppc64 - Skip special memory regions to find a
- * suitable buffer with top down approach.
- * @kbuf: Buffer contents and memory parameters.
- * @buf_min: Minimum address for the buffer.
- * @buf_max: Maximum address for the buffer.
- * @emem: Exclude memory ranges.
- *
- * Returns 0 on success, negative errno on error.
- */
-static int locate_mem_hole_top_down_ppc64(struct kexec_buf *kbuf,
- u64 buf_min, u64 buf_max,
- const struct crash_mem *emem)
-{
- int i, ret = 0, err = -EADDRNOTAVAIL;
- u64 start, end, tmin, tmax;
-
- tmax = buf_max;
- for (i = (emem->nr_ranges - 1); i >= 0; i--) {
- start = emem->ranges[i].start;
- end = emem->ranges[i].end;
-
- if (start > tmax)
- continue;
-
- if (end < tmax) {
- tmin = (end < buf_min ? buf_min : end + 1);
- ret = __locate_mem_hole_top_down(kbuf, tmin, tmax);
- if (!ret)
- return 0;
- }
-
- tmax = start - 1;
-
- if (tmax < buf_min) {
- ret = err;
- break;
- }
- ret = 0;
- }
-
- if (!ret) {
- tmin = buf_min;
- ret = __locate_mem_hole_top_down(kbuf, tmin, tmax);
- }
- return ret;
-}
-
-/**
- * __locate_mem_hole_bottom_up - Looks bottom up for a large enough memory hole
- * in the memory regions between buf_min & buf_max
- * for the buffer. If found, sets kbuf->mem.
- * @kbuf: Buffer contents and memory parameters.
- * @buf_min: Minimum address for the buffer.
- * @buf_max: Maximum address for the buffer.
- *
- * Returns 0 on success, negative errno on error.
- */
-static int __locate_mem_hole_bottom_up(struct kexec_buf *kbuf,
- u64 buf_min, u64 buf_max)
-{
- int ret = -EADDRNOTAVAIL;
- phys_addr_t start, end;
- u64 i;
-
- for_each_mem_range(i, &start, &end) {
- /*
- * memblock uses [start, end) convention while it is
- * [start, end] here. Fix the off-by-one to have the
- * same convention.
- */
- end -= 1;
-
- if (end < buf_min)
- continue;
-
- /* Memory hole not found */
- if (start > buf_max)
- break;
-
- /* Adjust memory region based on the given range */
- if (start < buf_min)
- start = buf_min;
- if (end > buf_max)
- end = buf_max;
-
- start = ALIGN(start, kbuf->buf_align);
- if (start < end && (end - start + 1) >= kbuf->memsz) {
- /* Suitable memory range found. Set kbuf->mem */
- kbuf->mem = start;
- ret = 0;
- break;
- }
- }
-
- return ret;
-}
-
-/**
- * locate_mem_hole_bottom_up_ppc64 - Skip special memory regions to find a
- * suitable buffer with bottom up approach.
- * @kbuf: Buffer contents and memory parameters.
- * @buf_min: Minimum address for the buffer.
- * @buf_max: Maximum address for the buffer.
- * @emem: Exclude memory ranges.
- *
- * Returns 0 on success, negative errno on error.
- */
-static int locate_mem_hole_bottom_up_ppc64(struct kexec_buf *kbuf,
- u64 buf_min, u64 buf_max,
- const struct crash_mem *emem)
-{
- int i, ret = 0, err = -EADDRNOTAVAIL;
- u64 start, end, tmin, tmax;
-
- tmin = buf_min;
- for (i = 0; i < emem->nr_ranges; i++) {
- start = emem->ranges[i].start;
- end = emem->ranges[i].end;
-
- if (end < tmin)
- continue;
-
- if (start > tmin) {
- tmax = (start > buf_max ? buf_max : start - 1);
- ret = __locate_mem_hole_bottom_up(kbuf, tmin, tmax);
- if (!ret)
- return 0;
- }
+ struct crash_mem *emem;
+ int i;
- tmin = end + 1;
+ emem = image->arch.exclude_ranges;
+ for (i = 0; i < emem->nr_ranges; i++)
+ if (start < emem->ranges[i].end && end > emem->ranges[i].start)
+ return 1;
- if (tmin > buf_max) {
- ret = err;
- break;
- }
- ret = 0;
- }
-
- if (!ret) {
- tmax = buf_max;
- ret = __locate_mem_hole_bottom_up(kbuf, tmin, tmax);
- }
- return ret;
+ return 0;
}
#ifdef CONFIG_CRASH_DUMP
/**
- * get_usable_memory_ranges - Get usable memory ranges. This list includes
- * regions like crashkernel, opal/rtas & tce-table,
- * that kdump kernel could use.
- * @mem_ranges: Range list to add the memory ranges to.
- *
- * Returns 0 on success, negative errno on error.
- */
-static int get_usable_memory_ranges(struct crash_mem **mem_ranges)
-{
- int ret;
-
- /*
- * Early boot failure observed on guests when low memory (first memory
- * block?) is not added to usable memory. So, add [0, crashk_res.end]
- * instead of [crashk_res.start, crashk_res.end] to workaround it.
- * Also, crashed kernel's memory must be added to reserve map to
- * avoid kdump kernel from using it.
- */
- ret = add_mem_range(mem_ranges, 0, crashk_res.end + 1);
- if (ret)
- goto out;
-
- ret = add_rtas_mem_range(mem_ranges);
- if (ret)
- goto out;
-
- ret = add_opal_mem_range(mem_ranges);
- if (ret)
- goto out;
-
- ret = add_tce_mem_ranges(mem_ranges);
-out:
- if (ret)
- pr_err("Failed to setup usable memory ranges\n");
- return ret;
-}
-
-/**
- * get_crash_memory_ranges - Get crash memory ranges. This list includes
- * first/crashing kernel's memory regions that
- * would be exported via an elfcore.
- * @mem_ranges: Range list to add the memory ranges to.
- *
- * Returns 0 on success, negative errno on error.
- */
-static int get_crash_memory_ranges(struct crash_mem **mem_ranges)
-{
- phys_addr_t base, end;
- struct crash_mem *tmem;
- u64 i;
- int ret;
-
- for_each_mem_range(i, &base, &end) {
- u64 size = end - base;
-
- /* Skip backup memory region, which needs a separate entry */
- if (base == BACKUP_SRC_START) {
- if (size > BACKUP_SRC_SIZE) {
- base = BACKUP_SRC_END + 1;
- size -= BACKUP_SRC_SIZE;
- } else
- continue;
- }
-
- ret = add_mem_range(mem_ranges, base, size);
- if (ret)
- goto out;
-
- /* Try merging adjacent ranges before reallocation attempt */
- if ((*mem_ranges)->nr_ranges == (*mem_ranges)->max_nr_ranges)
- sort_memory_ranges(*mem_ranges, true);
- }
-
- /* Reallocate memory ranges if there is no space to split ranges */
- tmem = *mem_ranges;
- if (tmem && (tmem->nr_ranges == tmem->max_nr_ranges)) {
- tmem = realloc_mem_ranges(mem_ranges);
- if (!tmem)
- goto out;
- }
-
- /* Exclude crashkernel region */
- ret = crash_exclude_mem_range(tmem, crashk_res.start, crashk_res.end);
- if (ret)
- goto out;
-
- /*
- * FIXME: For now, stay in parity with kexec-tools but if RTAS/OPAL
- * regions are exported to save their context at the time of
- * crash, they should actually be backed up just like the
- * first 64K bytes of memory.
- */
- ret = add_rtas_mem_range(mem_ranges);
- if (ret)
- goto out;
-
- ret = add_opal_mem_range(mem_ranges);
- if (ret)
- goto out;
-
- /* create a separate program header for the backup region */
- ret = add_mem_range(mem_ranges, BACKUP_SRC_START, BACKUP_SRC_SIZE);
- if (ret)
- goto out;
-
- sort_memory_ranges(*mem_ranges, false);
-out:
- if (ret)
- pr_err("Failed to setup crash memory ranges\n");
- return ret;
-}
-
-/**
* check_realloc_usable_mem - Reallocate buffer if it can't accommodate entries
* @um_info: Usable memory buffer and ranges info.
* @cnt: No. of entries to accommodate.
@@ -565,11 +194,10 @@ static int kdump_setup_usable_lmb(struct drmem_lmb *lmb, const __be32 **usm,
static int add_usable_mem_property(void *fdt, struct device_node *dn,
struct umem_info *um_info)
{
- int n_mem_addr_cells, n_mem_size_cells, node;
+ int node;
char path[NODE_PATH_LEN];
- int i, len, ranges, ret;
- const __be32 *prop;
- u64 base, end;
+ int i, ret;
+ u64 base, size;
of_node_get(dn);
@@ -588,41 +216,30 @@ static int add_usable_mem_property(void *fdt, struct device_node *dn,
goto out;
}
- /* Get the address & size cells */
- n_mem_addr_cells = of_n_addr_cells(dn);
- n_mem_size_cells = of_n_size_cells(dn);
- kexec_dprintk("address cells: %d, size cells: %d\n", n_mem_addr_cells,
- n_mem_size_cells);
-
um_info->idx = 0;
if (!check_realloc_usable_mem(um_info, 2)) {
ret = -ENOMEM;
goto out;
}
- prop = of_get_property(dn, "reg", &len);
- if (!prop || len <= 0) {
- ret = 0;
- goto out;
- }
-
/*
* "reg" property represents sequence of (addr,size) tuples
* each representing a memory range.
*/
- ranges = (len >> 2) / (n_mem_addr_cells + n_mem_size_cells);
-
- for (i = 0; i < ranges; i++) {
- base = of_read_number(prop, n_mem_addr_cells);
- prop += n_mem_addr_cells;
- end = base + of_read_number(prop, n_mem_size_cells) - 1;
- prop += n_mem_size_cells;
+ for (i = 0; ; i++) {
+ ret = of_property_read_reg(dn, i, &base, &size);
+ if (ret)
+ break;
- ret = add_usable_mem(um_info, base, end);
+ ret = add_usable_mem(um_info, base, base + size - 1);
if (ret)
goto out;
}
+ // No reg or empty reg? Skip this node.
+ if (i == 0)
+ goto out;
+
/*
* No kdump kernel usable memory found in this memory node.
* Write (0,0) tuple in linux,usable-memory property for
@@ -784,6 +401,23 @@ static void update_backup_region_phdr(struct kimage *image, Elf64_Ehdr *ehdr)
}
}
+static unsigned int kdump_extra_elfcorehdr_size(struct crash_mem *cmem)
+{
+#if defined(CONFIG_CRASH_HOTPLUG) && defined(CONFIG_MEMORY_HOTPLUG)
+ unsigned int extra_sz = 0;
+
+ if (CONFIG_CRASH_MAX_MEMORY_RANGES > (unsigned int)PN_XNUM)
+ pr_warn("Number of Phdrs %u exceeds max\n", CONFIG_CRASH_MAX_MEMORY_RANGES);
+ else if (cmem->nr_ranges >= CONFIG_CRASH_MAX_MEMORY_RANGES)
+ pr_warn("Configured crash mem ranges may not be enough\n");
+ else
+ extra_sz = (CONFIG_CRASH_MAX_MEMORY_RANGES - cmem->nr_ranges) * sizeof(Elf64_Phdr);
+
+ return extra_sz;
+#endif
+ return 0;
+}
+
/**
* load_elfcorehdr_segment - Setup crash memory ranges and initialize elfcorehdr
* segment needed to load kdump kernel.
@@ -815,7 +449,8 @@ static int load_elfcorehdr_segment(struct kimage *image, struct kexec_buf *kbuf)
kbuf->buffer = headers;
kbuf->mem = KEXEC_BUF_MEM_UNKNOWN;
- kbuf->bufsz = kbuf->memsz = headers_sz;
+ kbuf->bufsz = headers_sz;
+ kbuf->memsz = headers_sz + kdump_extra_elfcorehdr_size(cmem);
kbuf->top_down = false;
ret = kexec_add_buffer(kbuf);
@@ -918,13 +553,18 @@ int setup_purgatory_ppc64(struct kimage *image, const void *slave_code,
if (dn) {
u64 val;
- of_property_read_u64(dn, "opal-base-address", &val);
+ ret = of_property_read_u64(dn, "opal-base-address", &val);
+ if (ret)
+ goto out;
+
ret = kexec_purgatory_get_set_symbol(image, "opal_base", &val,
sizeof(val), false);
if (ret)
goto out;
- of_property_read_u64(dn, "opal-entry-address", &val);
+ ret = of_property_read_u64(dn, "opal-entry-address", &val);
+ if (ret)
+ goto out;
ret = kexec_purgatory_get_set_symbol(image, "opal_entry", &val,
sizeof(val), false);
}
@@ -974,11 +614,13 @@ static unsigned int cpu_node_size(void)
return size;
}
-static unsigned int kdump_extra_fdt_size_ppc64(struct kimage *image)
+static unsigned int kdump_extra_fdt_size_ppc64(struct kimage *image, unsigned int cpu_nodes)
{
- unsigned int cpu_nodes, extra_size = 0;
- struct device_node *dn;
+ unsigned int extra_size = 0;
u64 usm_entries;
+#ifdef CONFIG_CRASH_HOTPLUG
+ unsigned int possible_cpu_nodes;
+#endif
if (!IS_ENABLED(CONFIG_CRASH_DUMP) || image->type != KEXEC_TYPE_CRASH)
return 0;
@@ -994,17 +636,18 @@ static unsigned int kdump_extra_fdt_size_ppc64(struct kimage *image)
extra_size += (unsigned int)(usm_entries * sizeof(u64));
}
+#ifdef CONFIG_CRASH_HOTPLUG
/*
- * Get the number of CPU nodes in the current DT. This allows to
- * reserve places for CPU nodes added since the boot time.
+ * Make sure enough space is reserved to accommodate possible CPU nodes
+ * in the crash FDT. This allows packing possible CPU nodes which are
+ * not yet present in the system without regenerating the entire FDT.
*/
- cpu_nodes = 0;
- for_each_node_by_type(dn, "cpu") {
- cpu_nodes++;
+ if (image->type == KEXEC_TYPE_CRASH) {
+ possible_cpu_nodes = num_possible_cpus() / threads_per_core;
+ if (possible_cpu_nodes > cpu_nodes)
+ extra_size += (possible_cpu_nodes - cpu_nodes) * cpu_node_size();
}
-
- if (cpu_nodes > boot_cpu_node_count)
- extra_size += (cpu_nodes - boot_cpu_node_count) * cpu_node_size();
+#endif
return extra_size;
}
@@ -1016,103 +659,30 @@ static unsigned int kdump_extra_fdt_size_ppc64(struct kimage *image)
*
* Returns the estimated extra size needed for kexec/kdump kernel FDT.
*/
-unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image)
+unsigned int kexec_extra_fdt_size_ppc64(struct kimage *image, struct crash_mem *rmem)
{
- unsigned int extra_size = 0;
+ struct device_node *dn;
+ unsigned int cpu_nodes = 0, extra_size = 0;
// Budget some space for the password blob. There's already extra space
// for the key name
if (plpks_is_available())
extra_size += (unsigned int)plpks_get_passwordlen();
- return extra_size + kdump_extra_fdt_size_ppc64(image);
-}
-
-/**
- * add_node_props - Reads node properties from device node structure and add
- * them to fdt.
- * @fdt: Flattened device tree of the kernel
- * @node_offset: offset of the node to add a property at
- * @dn: device node pointer
- *
- * Returns 0 on success, negative errno on error.
- */
-static int add_node_props(void *fdt, int node_offset, const struct device_node *dn)
-{
- int ret = 0;
- struct property *pp;
-
- if (!dn)
- return -EINVAL;
-
- for_each_property_of_node(dn, pp) {
- ret = fdt_setprop(fdt, node_offset, pp->name, pp->value, pp->length);
- if (ret < 0) {
- pr_err("Unable to add %s property: %s\n", pp->name, fdt_strerror(ret));
- return ret;
- }
- }
- return ret;
-}
-
-/**
- * update_cpus_node - Update cpus node of flattened device tree using of_root
- * device node.
- * @fdt: Flattened device tree of the kernel.
- *
- * Returns 0 on success, negative errno on error.
- */
-static int update_cpus_node(void *fdt)
-{
- struct device_node *cpus_node, *dn;
- int cpus_offset, cpus_subnode_offset, ret = 0;
-
- cpus_offset = fdt_path_offset(fdt, "/cpus");
- if (cpus_offset < 0 && cpus_offset != -FDT_ERR_NOTFOUND) {
- pr_err("Malformed device tree: error reading /cpus node: %s\n",
- fdt_strerror(cpus_offset));
- return cpus_offset;
- }
-
- if (cpus_offset > 0) {
- ret = fdt_del_node(fdt, cpus_offset);
- if (ret < 0) {
- pr_err("Error deleting /cpus node: %s\n", fdt_strerror(ret));
- return -EINVAL;
- }
- }
-
- /* Add cpus node to fdt */
- cpus_offset = fdt_add_subnode(fdt, fdt_path_offset(fdt, "/"), "cpus");
- if (cpus_offset < 0) {
- pr_err("Error creating /cpus node: %s\n", fdt_strerror(cpus_offset));
- return -EINVAL;
+ /* Get the number of CPU nodes in the current device tree */
+ for_each_node_by_type(dn, "cpu") {
+ cpu_nodes++;
}
- /* Add cpus node properties */
- cpus_node = of_find_node_by_path("/cpus");
- ret = add_node_props(fdt, cpus_offset, cpus_node);
- of_node_put(cpus_node);
- if (ret < 0)
- return ret;
+ /* Consider extra space for CPU nodes added since the boot time */
+ if (cpu_nodes > boot_cpu_node_count)
+ extra_size += (cpu_nodes - boot_cpu_node_count) * cpu_node_size();
- /* Loop through all subnodes of cpus and add them to fdt */
- for_each_node_by_type(dn, "cpu") {
- cpus_subnode_offset = fdt_add_subnode(fdt, cpus_offset, dn->full_name);
- if (cpus_subnode_offset < 0) {
- pr_err("Unable to add %s subnode: %s\n", dn->full_name,
- fdt_strerror(cpus_subnode_offset));
- ret = cpus_subnode_offset;
- goto out;
- }
+ /* Consider extra space for reserved memory ranges if any */
+ if (rmem->nr_ranges > 0)
+ extra_size += sizeof(struct fdt_reserve_entry) * rmem->nr_ranges;
- ret = add_node_props(fdt, cpus_subnode_offset, dn);
- if (ret < 0)
- goto out;
- }
-out:
- of_node_put(dn);
- return ret;
+ return extra_size + kdump_extra_fdt_size_ppc64(image, cpu_nodes);
}
static int copy_property(void *fdt, int node_offset, const struct device_node *dn,
@@ -1166,18 +736,13 @@ static int update_pci_dma_nodes(void *fdt, const char *dmapropname)
* being loaded.
* @image: kexec image being loaded.
* @fdt: Flattened device tree for the next kernel.
- * @initrd_load_addr: Address where the next initrd will be loaded.
- * @initrd_len: Size of the next initrd, or 0 if there will be none.
- * @cmdline: Command line for the next kernel, or NULL if there will
- * be none.
+ * @rmem: Reserved memory ranges.
*
* Returns 0 on success, negative errno on error.
*/
-int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
- unsigned long initrd_load_addr,
- unsigned long initrd_len, const char *cmdline)
+int setup_new_fdt_ppc64(const struct kimage *image, void *fdt, struct crash_mem *rmem)
{
- struct crash_mem *umem = NULL, *rmem = NULL;
+ struct crash_mem *umem = NULL;
int i, nr_ranges, ret;
#ifdef CONFIG_CRASH_DUMP
@@ -1233,10 +798,6 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
goto out;
/* Update memory reserve map */
- ret = get_reserved_memory_ranges(&rmem);
- if (ret)
- goto out;
-
nr_ranges = rmem ? rmem->nr_ranges : 0;
for (i = 0; i < nr_ranges; i++) {
u64 base, size;
@@ -1256,70 +817,11 @@ int setup_new_fdt_ppc64(const struct kimage *image, void *fdt,
ret = plpks_populate_fdt(fdt);
out:
- kfree(rmem);
kfree(umem);
return ret;
}
/**
- * arch_kexec_locate_mem_hole - Skip special memory regions like rtas, opal,
- * tce-table, reserved-ranges & such (exclude
- * memory ranges) as they can't be used for kexec
- * segment buffer. Sets kbuf->mem when a suitable
- * memory hole is found.
- * @kbuf: Buffer contents and memory parameters.
- *
- * Assumes minimum of PAGE_SIZE alignment for kbuf->memsz & kbuf->buf_align.
- *
- * Returns 0 on success, negative errno on error.
- */
-int arch_kexec_locate_mem_hole(struct kexec_buf *kbuf)
-{
- struct crash_mem **emem;
- u64 buf_min, buf_max;
- int ret;
-
- /* Look up the exclude ranges list while locating the memory hole */
- emem = &(kbuf->image->arch.exclude_ranges);
- if (!(*emem) || ((*emem)->nr_ranges == 0)) {
- pr_warn("No exclude range list. Using the default locate mem hole method\n");
- return kexec_locate_mem_hole(kbuf);
- }
-
- buf_min = kbuf->buf_min;
- buf_max = kbuf->buf_max;
- /* Segments for kdump kernel should be within crashkernel region */
- if (IS_ENABLED(CONFIG_CRASH_DUMP) && kbuf->image->type == KEXEC_TYPE_CRASH) {
- buf_min = (buf_min < crashk_res.start ?
- crashk_res.start : buf_min);
- buf_max = (buf_max > crashk_res.end ?
- crashk_res.end : buf_max);
- }
-
- if (buf_min > buf_max) {
- pr_err("Invalid buffer min and/or max values\n");
- return -EINVAL;
- }
-
- if (kbuf->top_down)
- ret = locate_mem_hole_top_down_ppc64(kbuf, buf_min, buf_max,
- *emem);
- else
- ret = locate_mem_hole_bottom_up_ppc64(kbuf, buf_min, buf_max,
- *emem);
-
- /* Add the buffer allocated to the exclude list for the next lookup */
- if (!ret) {
- add_mem_range(emem, kbuf->mem, kbuf->memsz);
- sort_memory_ranges(*emem, true);
- } else {
- pr_err("Failed to locate memory buffer of size %lu\n",
- kbuf->memsz);
- }
- return ret;
-}
-
-/**
* arch_kexec_kernel_image_probe - Does additional handling needed to setup
* kexec segments.
* @image: kexec image being loaded.
diff --git a/arch/powerpc/kexec/ranges.c b/arch/powerpc/kexec/ranges.c
index 33b780049aaf..3702b0bdab14 100644
--- a/arch/powerpc/kexec/ranges.c
+++ b/arch/powerpc/kexec/ranges.c
@@ -20,9 +20,13 @@
#include <linux/kexec.h>
#include <linux/of.h>
#include <linux/slab.h>
+#include <linux/memblock.h>
+#include <linux/crash_core.h>
#include <asm/sections.h>
#include <asm/kexec_ranges.h>
+#include <asm/crashdump-ppc64.h>
+#if defined(CONFIG_KEXEC_FILE) || defined(CONFIG_CRASH_DUMP)
/**
* get_max_nr_ranges - Get the max no. of ranges crash_mem structure
* could hold, given the size allocated for it.
@@ -234,13 +238,16 @@ int add_mem_range(struct crash_mem **mem_ranges, u64 base, u64 size)
return __add_mem_range(mem_ranges, base, size);
}
+#endif /* CONFIG_KEXEC_FILE || CONFIG_CRASH_DUMP */
+
+#ifdef CONFIG_KEXEC_FILE
/**
* add_tce_mem_ranges - Adds tce-table range to the given memory ranges list.
* @mem_ranges: Range list to add the memory range(s) to.
*
* Returns 0 on success, negative errno on error.
*/
-int add_tce_mem_ranges(struct crash_mem **mem_ranges)
+static int add_tce_mem_ranges(struct crash_mem **mem_ranges)
{
struct device_node *dn = NULL;
int ret = 0;
@@ -279,7 +286,7 @@ int add_tce_mem_ranges(struct crash_mem **mem_ranges)
*
* Returns 0 on success, negative errno on error.
*/
-int add_initrd_mem_range(struct crash_mem **mem_ranges)
+static int add_initrd_mem_range(struct crash_mem **mem_ranges)
{
u64 base, end;
int ret;
@@ -296,7 +303,6 @@ int add_initrd_mem_range(struct crash_mem **mem_ranges)
return ret;
}
-#ifdef CONFIG_PPC_64S_HASH_MMU
/**
* add_htab_mem_range - Adds htab range to the given memory ranges list,
* if it exists
@@ -304,14 +310,18 @@ int add_initrd_mem_range(struct crash_mem **mem_ranges)
*
* Returns 0 on success, negative errno on error.
*/
-int add_htab_mem_range(struct crash_mem **mem_ranges)
+static int add_htab_mem_range(struct crash_mem **mem_ranges)
{
+
+#ifdef CONFIG_PPC_64S_HASH_MMU
if (!htab_address)
return 0;
return add_mem_range(mem_ranges, __pa(htab_address), htab_size_bytes);
-}
+#else
+ return 0;
#endif
+}
/**
* add_kernel_mem_range - Adds kernel text region to the given
@@ -320,18 +330,20 @@ int add_htab_mem_range(struct crash_mem **mem_ranges)
*
* Returns 0 on success, negative errno on error.
*/
-int add_kernel_mem_range(struct crash_mem **mem_ranges)
+static int add_kernel_mem_range(struct crash_mem **mem_ranges)
{
return add_mem_range(mem_ranges, 0, __pa(_end));
}
+#endif /* CONFIG_KEXEC_FILE */
+#if defined(CONFIG_KEXEC_FILE) || defined(CONFIG_CRASH_DUMP)
/**
* add_rtas_mem_range - Adds RTAS region to the given memory ranges list.
* @mem_ranges: Range list to add the memory range to.
*
* Returns 0 on success, negative errno on error.
*/
-int add_rtas_mem_range(struct crash_mem **mem_ranges)
+static int add_rtas_mem_range(struct crash_mem **mem_ranges)
{
struct device_node *dn;
u32 base, size;
@@ -356,7 +368,7 @@ int add_rtas_mem_range(struct crash_mem **mem_ranges)
*
* Returns 0 on success, negative errno on error.
*/
-int add_opal_mem_range(struct crash_mem **mem_ranges)
+static int add_opal_mem_range(struct crash_mem **mem_ranges)
{
struct device_node *dn;
u64 base, size;
@@ -374,7 +386,9 @@ int add_opal_mem_range(struct crash_mem **mem_ranges)
of_node_put(dn);
return ret;
}
+#endif /* CONFIG_KEXEC_FILE || CONFIG_CRASH_DUMP */
+#ifdef CONFIG_KEXEC_FILE
/**
* add_reserved_mem_ranges - Adds "/reserved-ranges" regions exported by f/w
* to the given memory ranges list.
@@ -382,7 +396,7 @@ int add_opal_mem_range(struct crash_mem **mem_ranges)
*
* Returns 0 on success, negative errno on error.
*/
-int add_reserved_mem_ranges(struct crash_mem **mem_ranges)
+static int add_reserved_mem_ranges(struct crash_mem **mem_ranges)
{
int n_mem_addr_cells, n_mem_size_cells, i, len, cells, ret = 0;
struct device_node *root = of_find_node_by_path("/");
@@ -412,3 +426,283 @@ int add_reserved_mem_ranges(struct crash_mem **mem_ranges)
return ret;
}
+
+/**
+ * get_reserved_memory_ranges - Get reserve memory ranges. This list includes
+ * memory regions that should be added to the
+ * memory reserve map to ensure the region is
+ * protected from any mischief.
+ * @mem_ranges: Range list to add the memory ranges to.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+int get_reserved_memory_ranges(struct crash_mem **mem_ranges)
+{
+ int ret;
+
+ ret = add_rtas_mem_range(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_tce_mem_ranges(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_reserved_mem_ranges(mem_ranges);
+out:
+ if (ret)
+ pr_err("Failed to setup reserved memory ranges\n");
+ return ret;
+}
+
+/**
+ * get_exclude_memory_ranges - Get exclude memory ranges. This list includes
+ * regions like opal/rtas, tce-table, initrd,
+ * kernel, htab which should be avoided while
+ * setting up kexec load segments.
+ * @mem_ranges: Range list to add the memory ranges to.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+int get_exclude_memory_ranges(struct crash_mem **mem_ranges)
+{
+ int ret;
+
+ ret = add_tce_mem_ranges(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_initrd_mem_range(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_htab_mem_range(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_kernel_mem_range(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_rtas_mem_range(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_opal_mem_range(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_reserved_mem_ranges(mem_ranges);
+ if (ret)
+ goto out;
+
+ /* exclude memory ranges should be sorted for easy lookup */
+ sort_memory_ranges(*mem_ranges, true);
+out:
+ if (ret)
+ pr_err("Failed to setup exclude memory ranges\n");
+ return ret;
+}
+
+#ifdef CONFIG_CRASH_DUMP
+/**
+ * get_usable_memory_ranges - Get usable memory ranges. This list includes
+ * regions like crashkernel, opal/rtas & tce-table,
+ * that kdump kernel could use.
+ * @mem_ranges: Range list to add the memory ranges to.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+int get_usable_memory_ranges(struct crash_mem **mem_ranges)
+{
+ int ret;
+
+ /*
+ * Early boot failure observed on guests when low memory (first memory
+ * block?) is not added to usable memory. So, add [0, crashk_res.end]
+ * instead of [crashk_res.start, crashk_res.end] to workaround it.
+ * Also, crashed kernel's memory must be added to reserve map to
+ * avoid kdump kernel from using it.
+ */
+ ret = add_mem_range(mem_ranges, 0, crashk_res.end + 1);
+ if (ret)
+ goto out;
+
+ ret = add_rtas_mem_range(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_opal_mem_range(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_tce_mem_ranges(mem_ranges);
+out:
+ if (ret)
+ pr_err("Failed to setup usable memory ranges\n");
+ return ret;
+}
+#endif /* CONFIG_CRASH_DUMP */
+#endif /* CONFIG_KEXEC_FILE */
+
+#ifdef CONFIG_CRASH_DUMP
+/**
+ * get_crash_memory_ranges - Get crash memory ranges. This list includes
+ * first/crashing kernel's memory regions that
+ * would be exported via an elfcore.
+ * @mem_ranges: Range list to add the memory ranges to.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+int get_crash_memory_ranges(struct crash_mem **mem_ranges)
+{
+ phys_addr_t base, end;
+ struct crash_mem *tmem;
+ u64 i;
+ int ret;
+
+ for_each_mem_range(i, &base, &end) {
+ u64 size = end - base;
+
+ /* Skip backup memory region, which needs a separate entry */
+ if (base == BACKUP_SRC_START) {
+ if (size > BACKUP_SRC_SIZE) {
+ base = BACKUP_SRC_END + 1;
+ size -= BACKUP_SRC_SIZE;
+ } else
+ continue;
+ }
+
+ ret = add_mem_range(mem_ranges, base, size);
+ if (ret)
+ goto out;
+
+ /* Try merging adjacent ranges before reallocation attempt */
+ if ((*mem_ranges)->nr_ranges == (*mem_ranges)->max_nr_ranges)
+ sort_memory_ranges(*mem_ranges, true);
+ }
+
+ /* Reallocate memory ranges if there is no space to split ranges */
+ tmem = *mem_ranges;
+ if (tmem && (tmem->nr_ranges == tmem->max_nr_ranges)) {
+ tmem = realloc_mem_ranges(mem_ranges);
+ if (!tmem)
+ goto out;
+ }
+
+ /* Exclude crashkernel region */
+ ret = crash_exclude_mem_range(tmem, crashk_res.start, crashk_res.end);
+ if (ret)
+ goto out;
+
+ /*
+ * FIXME: For now, stay in parity with kexec-tools but if RTAS/OPAL
+ * regions are exported to save their context at the time of
+ * crash, they should actually be backed up just like the
+ * first 64K bytes of memory.
+ */
+ ret = add_rtas_mem_range(mem_ranges);
+ if (ret)
+ goto out;
+
+ ret = add_opal_mem_range(mem_ranges);
+ if (ret)
+ goto out;
+
+ /* create a separate program header for the backup region */
+ ret = add_mem_range(mem_ranges, BACKUP_SRC_START, BACKUP_SRC_SIZE);
+ if (ret)
+ goto out;
+
+ sort_memory_ranges(*mem_ranges, false);
+out:
+ if (ret)
+ pr_err("Failed to setup crash memory ranges\n");
+ return ret;
+}
+
+/**
+ * remove_mem_range - Removes the given memory range from the range list.
+ * @mem_ranges: Range list to remove the memory range to.
+ * @base: Base address of the range to remove.
+ * @size: Size of the memory range to remove.
+ *
+ * (Re)allocates memory, if needed.
+ *
+ * Returns 0 on success, negative errno on error.
+ */
+int remove_mem_range(struct crash_mem **mem_ranges, u64 base, u64 size)
+{
+ u64 end;
+ int ret = 0;
+ unsigned int i;
+ u64 mstart, mend;
+ struct crash_mem *mem_rngs = *mem_ranges;
+
+ if (!size)
+ return 0;
+
+ /*
+ * Memory range are stored as start and end address, use
+ * the same format to do remove operation.
+ */
+ end = base + size - 1;
+
+ for (i = 0; i < mem_rngs->nr_ranges; i++) {
+ mstart = mem_rngs->ranges[i].start;
+ mend = mem_rngs->ranges[i].end;
+
+ /*
+ * Memory range to remove is not part of this range entry
+ * in the memory range list
+ */
+ if (!(base >= mstart && end <= mend))
+ continue;
+
+ /*
+ * Memory range to remove is equivalent to this entry in the
+ * memory range list. Remove the range entry from the list.
+ */
+ if (base == mstart && end == mend) {
+ for (; i < mem_rngs->nr_ranges - 1; i++) {
+ mem_rngs->ranges[i].start = mem_rngs->ranges[i+1].start;
+ mem_rngs->ranges[i].end = mem_rngs->ranges[i+1].end;
+ }
+ mem_rngs->nr_ranges--;
+ goto out;
+ }
+ /*
+ * Start address of the memory range to remove and the
+ * current memory range entry in the list is same. Just
+ * move the start address of the current memory range
+ * entry in the list to end + 1.
+ */
+ else if (base == mstart) {
+ mem_rngs->ranges[i].start = end + 1;
+ goto out;
+ }
+ /*
+ * End address of the memory range to remove and the
+ * current memory range entry in the list is same.
+ * Just move the end address of the current memory
+ * range entry in the list to base - 1.
+ */
+ else if (end == mend) {
+ mem_rngs->ranges[i].end = base - 1;
+ goto out;
+ }
+ /*
+ * Memory range to remove is not at the edge of current
+ * memory range entry. Split the current memory entry into
+ * two half.
+ */
+ else {
+ mem_rngs->ranges[i].end = base - 1;
+ size = mem_rngs->ranges[i].end - end;
+ ret = add_mem_range(mem_ranges, end + 1, size);
+ }
+ }
+out:
+ return ret;
+}
+#endif /* CONFIG_CRASH_DUMP */
diff --git a/arch/powerpc/kexec/relocate_32.S b/arch/powerpc/kexec/relocate_32.S
index 104c9911f406..dd86e338307d 100644
--- a/arch/powerpc/kexec/relocate_32.S
+++ b/arch/powerpc/kexec/relocate_32.S
@@ -348,16 +348,13 @@ write_utlb:
rlwinm r10, r24, 0, 22, 27
cmpwi r10, PPC47x_TLB0_4K
- bne 0f
li r10, 0x1000 /* r10 = 4k */
- ANNOTATE_INTRA_FUNCTION_CALL
- bl 1f
+ beq 0f
-0:
/* Defaults to 256M */
lis r10, 0x1000
- bcl 20,31,$+4
+0: bcl 20,31,$+4
1: mflr r4
addi r4, r4, (2f-1b) /* virtual address of 2f */