euleros inclusion category: feature bugzilla: NA issues: #I1RC8Z CVE: NA
In normal kexec, relocating kernel may cost 5 ~ 10 seconds, to copy all segments from vmalloced memory to kernel boot memory, because of disabled mmu.
We introduce quick kexec to save time of copying memory as above, just like kdump(kexec on crash), by using reserved memory "Quick Kexec".
Constructing quick kimage as the same as crash kernel, then simply copy all segments of kimage to reserved memroy.
We also add this support in syscall kexec_load using flags of KEXEC_QUICK.
Signed-off-by: Sang Yan sangyan@huawei.com --- arch/Kconfig | 9 +++++++++ include/linux/ioport.h | 1 + include/linux/kexec.h | 11 ++++++++++- include/uapi/linux/kexec.h | 1 + kernel/kexec.c | 10 ++++++++++ kernel/kexec_core.c | 41 ++++++++++++++++++++++++++++++++--------- 6 files changed, 63 insertions(+), 10 deletions(-)
diff --git a/arch/Kconfig b/arch/Kconfig index d3d70369..11580292 100644 --- a/arch/Kconfig +++ b/arch/Kconfig @@ -18,6 +18,15 @@ config KEXEC_CORE select CRASH_CORE bool
+config QUICK_KEXEC + bool "Support for quick kexec" + depends on KEXEC_CORE + help + It uses pre-reserved memory to accelerate kexec, just like + crash kexec, loads new kernel and initrd to reserved memory, + and boots new kernel on that memory. It will save the time + of relocating kernel. + config HAVE_IMA_KEXEC bool
diff --git a/include/linux/ioport.h b/include/linux/ioport.h index 5330288..1a438ec 100644 --- a/include/linux/ioport.h +++ b/include/linux/ioport.h @@ -139,6 +139,7 @@ enum { IORES_DESC_PERSISTENT_MEMORY_LEGACY = 5, IORES_DESC_DEVICE_PRIVATE_MEMORY = 6, IORES_DESC_DEVICE_PUBLIC_MEMORY = 7, + IORES_DESC_QUICK_KEXEC = 8, };
/* helpers to define resources */ diff --git a/include/linux/kexec.h b/include/linux/kexec.h index d6b8d0a..98cf0fb 100644 --- a/include/linux/kexec.h +++ b/include/linux/kexec.h @@ -233,9 +233,10 @@ struct kimage { unsigned long control_page;
/* Flags to indicate special processing */ - unsigned int type : 1; + unsigned int type : 2; #define KEXEC_TYPE_DEFAULT 0 #define KEXEC_TYPE_CRASH 1 +#define KEXEC_TYPE_QUICK 2 unsigned int preserve_context : 1; /* If set, we are using file mode kexec syscall */ unsigned int file_mode:1; @@ -296,6 +297,11 @@ extern int kexec_load_disabled; #define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_PRESERVE_CONTEXT) #endif
+#ifdef CONFIG_QUICK_KEXEC +#undef KEXEC_FLAGS +#define KEXEC_FLAGS (KEXEC_ON_CRASH | KEXEC_QUICK) +#endif + /* List of defined/legal kexec file flags */ #define KEXEC_FILE_FLAGS (KEXEC_FILE_UNLOAD | KEXEC_FILE_ON_CRASH | \ KEXEC_FILE_NO_INITRAMFS) @@ -305,6 +311,9 @@ extern int kexec_load_disabled; extern struct resource crashk_res; extern struct resource crashk_low_res; extern note_buf_t __percpu *crash_notes; +#ifdef CONFIG_QUICK_KEXEC +extern struct resource quick_kexec_res; +#endif
/* flag to track if kexec reboot is in progress */ extern bool kexec_in_progress; diff --git a/include/uapi/linux/kexec.h b/include/uapi/linux/kexec.h index 6d11286..ca3cebe 100644 --- a/include/uapi/linux/kexec.h +++ b/include/uapi/linux/kexec.h @@ -12,6 +12,7 @@ /* kexec flags for different usage scenarios */ #define KEXEC_ON_CRASH 0x00000001 #define KEXEC_PRESERVE_CONTEXT 0x00000002 +#define KEXEC_QUICK 0x00000004 #define KEXEC_ARCH_MASK 0xffff0000
/* diff --git a/kernel/kexec.c b/kernel/kexec.c index 6855980..47dfad7 100644 --- a/kernel/kexec.c +++ b/kernel/kexec.c @@ -46,6 +46,9 @@ static int kimage_alloc_init(struct kimage **rimage, unsigned long entry, int ret; struct kimage *image; bool kexec_on_panic = flags & KEXEC_ON_CRASH; +#ifdef CONFIG_QUICK_KEXEC + bool kexec_on_quick = flags & KEXEC_QUICK; +#endif
if (kexec_on_panic) { /* Verify we have a valid entry point */ @@ -71,6 +74,13 @@ static int kimage_alloc_init(struct kimage **rimage, unsigned long entry, image->type = KEXEC_TYPE_CRASH; }
+#ifdef CONFIG_QUICK_KEXEC + if (kexec_on_quick) { + image->control_page = quick_kexec_res.start; + image->type = KEXEC_TYPE_QUICK; + } +#endif + ret = sanity_check_segment_list(image); if (ret) goto out_free_image; diff --git a/kernel/kexec_core.c b/kernel/kexec_core.c index b36c9c4..595a757 100644 --- a/kernel/kexec_core.c +++ b/kernel/kexec_core.c @@ -74,6 +74,16 @@ struct resource crashk_low_res = { .desc = IORES_DESC_CRASH_KERNEL };
+#ifdef CONFIG_QUICK_KEXEC +struct resource quick_kexec_res = { + .name = "Quick kexec", + .start = 0, + .end = 0, + .flags = IORESOURCE_BUSY | IORESOURCE_SYSTEM_RAM, + .desc = IORES_DESC_QUICK_KEXEC +}; +#endif + int kexec_should_crash(struct task_struct *p) { /* @@ -470,8 +480,10 @@ static struct page *kimage_alloc_normal_control_pages(struct kimage *image, return pages; }
-static struct page *kimage_alloc_crash_control_pages(struct kimage *image, - unsigned int order) + +static struct page *kimage_alloc_special_control_pages(struct kimage *image, + unsigned int order, + unsigned long end) { /* Control pages are special, they are the intermediaries * that are needed while we copy the rest of the pages @@ -501,7 +513,7 @@ static struct page *kimage_alloc_crash_control_pages(struct kimage *image, size = (1 << order) << PAGE_SHIFT; hole_start = (image->control_page + (size - 1)) & ~(size - 1); hole_end = hole_start + size - 1; - while (hole_end <= crashk_res.end) { + while (hole_end <= end) { unsigned long i;
cond_resched(); @@ -536,7 +548,6 @@ static struct page *kimage_alloc_crash_control_pages(struct kimage *image, return pages; }
- struct page *kimage_alloc_control_pages(struct kimage *image, unsigned int order) { @@ -547,8 +558,15 @@ struct page *kimage_alloc_control_pages(struct kimage *image, pages = kimage_alloc_normal_control_pages(image, order); break; case KEXEC_TYPE_CRASH: - pages = kimage_alloc_crash_control_pages(image, order); + pages = kimage_alloc_special_control_pages(image, order, + crashk_res.end); + break; +#ifdef CONFIG_QUICK_KEXEC + case KEXEC_TYPE_QUICK: + pages = kimage_alloc_special_control_pages(image, order, + quick_kexec_res.end); break; +#endif }
return pages; @@ -898,11 +916,11 @@ static int kimage_load_normal_segment(struct kimage *image, return result; }
-static int kimage_load_crash_segment(struct kimage *image, +static int kimage_load_special_segment(struct kimage *image, struct kexec_segment *segment) { - /* For crash dumps kernels we simply copy the data from - * user space to it's destination. + /* For crash dumps kernels and quick kexec kernels + * we simply copy the data from user space to it's destination. * We do things a page at a time for the sake of kmap. */ unsigned long maddr; @@ -976,8 +994,13 @@ int kimage_load_segment(struct kimage *image, result = kimage_load_normal_segment(image, segment); break; case KEXEC_TYPE_CRASH: - result = kimage_load_crash_segment(image, segment); + result = kimage_load_special_segment(image, segment); + break; +#ifdef CONFIG_QUICK_KEXEC + case KEXEC_TYPE_QUICK: + result = kimage_load_special_segment(image, segment); break; +#endif }
return result;
euleros inclusion category: feature bugzilla: NA issues: #I1RC8Z CVE: NA
Reserve memory for quick kexec on arm64 with cmdline "quickkexec=".
Signed-off-by: Sang Yan sangyan@huawei.com --- arch/arm64/kernel/setup.c | 6 ++++++ arch/arm64/mm/init.c | 43 +++++++++++++++++++++++++++++++++++++++++++ 2 files changed, 49 insertions(+)
diff --git a/arch/arm64/kernel/setup.c b/arch/arm64/kernel/setup.c index 155b8a6..e18ec7f 100644 --- a/arch/arm64/kernel/setup.c +++ b/arch/arm64/kernel/setup.c @@ -260,6 +260,12 @@ static void __init request_standard_resources(void) crashk_res.end <= res->end) request_resource(res, &crashk_res); #endif +#ifdef CONFIG_QUICK_KEXEC + if (quick_kexec_res.end && + quick_kexec_res.start >= res->start && + quick_kexec_res.end <= res->end) + request_resource(res, &quick_kexec_res); +#endif
for (j = 0; j < res_mem_count; j++) { if (res_resources[j].start >= res->start && diff --git a/arch/arm64/mm/init.c b/arch/arm64/mm/init.c index e43764d..075deea 100644 --- a/arch/arm64/mm/init.c +++ b/arch/arm64/mm/init.c @@ -192,6 +192,45 @@ static void __init kexec_reserve_crashkres_pages(void) } #endif /* CONFIG_KEXEC_CORE */
+#ifdef CONFIG_QUICK_KEXEC +static int __init parse_quick_kexec(char *p) +{ + if (!p) + return 0; + + quick_kexec_res.end = PAGE_ALIGN(memparse(p, NULL)); + + return 0; +} +early_param("quickkexec", parse_quick_kexec); + +static void __init reserve_quick_kexec(void) +{ + unsigned long long mem_start, mem_len; + + mem_len = quick_kexec_res.end; + if (mem_len == 0) + return; + + /* Current arm64 boot protocol requires 2MB alignment */ + mem_start = memblock_find_in_range(0, ARCH_LOW_ADDRESS_LIMIT, + mem_len, CRASH_ALIGN); + if (mem_start == 0) { + pr_warn("cannot allocate quick kexec mem (size:0x%llx)\n", + mem_len); + quick_kexec_res.end = 0; + return; + } + + memblock_reserve(mem_start, mem_len); + pr_info("quick kexec mem reserved: 0x%016llx - 0x%016llx (%lld MB)\n", + mem_start, mem_start + mem_len, mem_len >> 20); + + quick_kexec_res.start = mem_start; + quick_kexec_res.end = mem_start + mem_len - 1; +} +#endif + #ifdef CONFIG_CRASH_DUMP static int __init early_init_dt_scan_elfcorehdr(unsigned long node, const char *uname, int depth, void *data) @@ -582,6 +621,10 @@ void __init arm64_memblock_init(void)
reserve_crashkernel();
+#ifdef CONFIG_QUICK_KEXEC + reserve_quick_kexec(); +#endif + reserve_elfcorehdr();
high_memory = __va(memblock_end_of_DRAM() - 1) + 1;