From: Kefeng Wang wangkefeng.wang@huawei.com
hulk inclusion category: feature bugzilla: 48159 CVE: N/A
------------------------------
It's better to make the non-upstreamed feature into stand-alone file, which make us easy to backport mainline patches.
No functional changes.
Cc: Sang Yan sangyan@huawei.com Signed-off-by: Kefeng Wang wangkefeng.wang@huawei.com Reviewed-by: Sang Yan sangyan@huawei.com Reviewed-by: Liu Shixin liushixin2@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com --- arch/arm64/kernel/setup.c | 10 +--- arch/arm64/mm/Makefile | 1 + arch/arm64/mm/init.c | 43 +--------------- arch/arm64/mm/{pmem_reserve.h => internal.h} | 12 +++++ arch/arm64/mm/quick_kexec.c | 53 ++++++++++++++++++++ 5 files changed, 69 insertions(+), 50 deletions(-) rename arch/arm64/mm/{pmem_reserve.h => internal.h} (52%) create mode 100644 arch/arm64/mm/quick_kexec.c
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 08198b824846..2dd3ea837d35 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -53,7 +53,7 @@ #include <asm/xen/hypervisor.h> #include <asm/mmu_context.h>
-#include "../mm/pmem_reserve.h" +#include "../mm/internal.h"
static int num_standard_resources; static struct resource *standard_resources; @@ -291,13 +291,7 @@ static void __init request_standard_resources(void) request_resource(res, &crashk_res); #endif
-#ifdef CONFIG_QUICK_KEXEC - if (quick_kexec_res.end && - quick_kexec_res.start >= res->start && - quick_kexec_res.end <= res->end) - request_resource(res, &quick_kexec_res); -#endif - + request_quick_kexec_res(res); request_pin_mem_res(res); }
diff --git a/arch/arm64/mm/Makefile b/arch/arm64/mm/Makefile index 3634ad81bdf1..68a32305cff9 100644 --- a/arch/arm64/mm/Makefile +++ b/arch/arm64/mm/Makefile @@ -17,3 +17,4 @@ obj-$(CONFIG_KASAN) += kasan_init.o KASAN_SANITIZE_kasan_init.o := n
obj-$(CONFIG_ARM64_PMEM_RESERVE) += pmem_reserve.o +obj-$(CONFIG_QUICK_KEXEC) += quick_kexec.o diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index 5ab9dd7d55d9..90411356b8b2 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -45,7 +45,7 @@ #include <asm/tlb.h> #include <asm/alternative.h>
-#include "pmem_reserve.h" +#include "internal.h"
/* * We need to be able to catch inadvertent references to memstart_addr @@ -131,45 +131,6 @@ static void __init reserve_elfcorehdr(void) } #endif /* CONFIG_CRASH_DUMP */
-#ifdef CONFIG_QUICK_KEXEC -static int __init parse_quick_kexec(char *p) -{ - if (!p) - return 0; - - quick_kexec_res.end = PAGE_ALIGN(memparse(p, NULL)); - - return 0; -} -early_param("quickkexec", parse_quick_kexec); - -static void __init reserve_quick_kexec(void) -{ - unsigned long long mem_start, mem_len; - - mem_len = quick_kexec_res.end; - if (mem_len == 0) - return; - - /* Current arm64 boot protocol requires 2MB alignment */ - mem_start = memblock_find_in_range(0, arm64_dma_phys_limit, - mem_len, SZ_2M); - if (mem_start == 0) { - pr_warn("cannot allocate quick kexec mem (size:0x%llx)\n", - mem_len); - quick_kexec_res.end = 0; - return; - } - - memblock_reserve(mem_start, mem_len); - pr_info("quick kexec mem reserved: 0x%016llx - 0x%016llx (%lld MB)\n", - mem_start, mem_start + mem_len, mem_len >> 20); - - quick_kexec_res.start = mem_start; - quick_kexec_res.end = mem_start + mem_len - 1; -} -#endif - /* * Return the maximum physical address for a zone accessible by the given bits * limit. If DRAM starts above 32-bit, expand the zone to the maximum @@ -591,9 +552,7 @@ void __init bootmem_init(void) */ reserve_crashkernel();
-#ifdef CONFIG_QUICK_KEXEC reserve_quick_kexec(); -#endif
reserve_pmem();
diff --git a/arch/arm64/mm/pmem_reserve.h b/arch/arm64/mm/internal.h similarity index 52% rename from arch/arm64/mm/pmem_reserve.h rename to arch/arm64/mm/internal.h index d143198c9696..e1c6fc36b3b5 100644 --- a/arch/arm64/mm/pmem_reserve.h +++ b/arch/arm64/mm/internal.h @@ -1,5 +1,8 @@ /* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __ARM64_MM_INTERNAL_H +#define __ARM64_MM_INTERNAL_H + #include <linux/types.h>
#ifdef CONFIG_ARM64_PMEM_RESERVE @@ -11,3 +14,12 @@ static inline void __init setup_reserve_pmem(u64 start, u64 size) {} static inline void __init reserve_pmem(void) {} static inline void __init request_pmem_res_resource(void) {} #endif +#ifdef CONFIG_ARM64_PMEM_RESERVE +void __init reserve_quick_kexec(void); +void __init request_quick_kexec_res(struct resource *res); +#else +static inline void __init reserve_quick_kexec(void) {} +static inline void __init request_quick_kexec_res(struct resource *res) {} +#endif + +#endif /* ifndef _ARM64_MM_INTERNAL_H */ diff --git a/arch/arm64/mm/quick_kexec.c b/arch/arm64/mm/quick_kexec.c new file mode 100644 index 000000000000..fb68346f45a9 --- /dev/null +++ b/arch/arm64/mm/quick_kexec.c @@ -0,0 +1,53 @@ +// SPDX-License-Identifier: GPL-2.0-only + +#define pr_fmt(fmt) "quick_kexec: " fmt + +#include <linux/memblock.h> +#include <linux/ioport.h> +#include <linux/types.h> +#include <linux/kexec.h> + +static int __init parse_quick_kexec(char *p) +{ + if (!p) + return 0; + + quick_kexec_res.end = PAGE_ALIGN(memparse(p, NULL)); + + return 0; +} +early_param("quickkexec", parse_quick_kexec); + +void __init reserve_quick_kexec(void) +{ + unsigned long long mem_start, mem_len; + + mem_len = quick_kexec_res.end; + if (mem_len == 0) + return; + + /* Current arm64 boot protocol requires 2MB alignment */ + mem_start = memblock_find_in_range(0, arm64_dma_phys_limit, + mem_len, SZ_2M); + if (mem_start == 0) { + pr_warn("cannot allocate quick kexec mem (size:0x%llx)\n", + mem_len); + quick_kexec_res.end = 0; + return; + } + + memblock_reserve(mem_start, mem_len); + pr_info("quick kexec mem reserved: 0x%016llx - 0x%016llx (%lld MB)\n", + mem_start, mem_start + mem_len, mem_len >> 20); + + quick_kexec_res.start = mem_start; + quick_kexec_res.end = mem_start + mem_len - 1; +} + +void __init request_quick_kexec_res(struct resource *res) +{ + if (quick_kexec_res.end && + quick_kexec_res.start >= res->start && + quick_kexec_res.end <= res->end) + request_resource(res, &quick_kexec_res); +}