From: Sang Yan sangyan@huawei.com
hulk inclusion category: feature bugzilla: 48159 CVE: N/A
------------------------------
Reserve memory for quick kexec on arm64 with cmdline "quickkexec=".
Signed-off-by: Sang Yan sangyan@huawei.com Reviewed-by: Chen Wandun chenwandun@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- arch/arm64/kernel/setup.c | 6 ++++++ arch/arm64/mm/init.c | 42 +++++++++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+)
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index f89dbb3f9eea..b0fe635e943e 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -266,6 +266,12 @@ static void __init request_standard_resources(void) if (crashk_res.end && crashk_res.start >= res->start && crashk_res.end <= res->end) request_resource(res, &crashk_res); +#endif +#ifdef CONFIG_QUICK_KEXEC + if (quick_kexec_res.end && + quick_kexec_res.start >= res->start && + quick_kexec_res.end <= res->end) + request_resource(res, &quick_kexec_res); #endif } } diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index d5bcc6dffdee..a3edb18a22f1 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -128,6 +128,45 @@ static void __init reserve_elfcorehdr(void) } #endif /* CONFIG_CRASH_DUMP */
+#ifdef CONFIG_QUICK_KEXEC +static int __init parse_quick_kexec(char *p) +{ + if (!p) + return 0; + + quick_kexec_res.end = PAGE_ALIGN(memparse(p, NULL)); + + return 0; +} +early_param("quickkexec", parse_quick_kexec); + +static void __init reserve_quick_kexec(void) +{ + unsigned long long mem_start, mem_len; + + mem_len = quick_kexec_res.end; + if (mem_len == 0) + return; + + /* Current arm64 boot protocol requires 2MB alignment */ + mem_start = memblock_find_in_range(0, arm64_dma_phys_limit, + mem_len, SZ_2M); + if (mem_start == 0) { + pr_warn("cannot allocate quick kexec mem (size:0x%llx)\n", + mem_len); + quick_kexec_res.end = 0; + return; + } + + memblock_reserve(mem_start, mem_len); + pr_info("quick kexec mem reserved: 0x%016llx - 0x%016llx (%lld MB)\n", + mem_start, mem_start + mem_len, mem_len >> 20); + + quick_kexec_res.start = mem_start; + quick_kexec_res.end = mem_start + mem_len - 1; +} +#endif + /* * Return the maximum physical address for a zone with a given address size * limit. It currently assumes that for memory starting above 4G, 32-bit @@ -415,6 +454,9 @@ void __init bootmem_init(void) */ reserve_crashkernel();
+#ifdef CONFIG_QUICK_KEXEC + reserve_quick_kexec(); +#endif memblock_dump_all(); }