From: Ard Biesheuvel ard.biesheuvel@linaro.org
maillist inclusion commit c11744cd7b351b0fbc5233c04c32822544c96fc1 category: feature feature: ARM kaslr support bugzilla: https://gitee.com/openeuler/kernel/issues/I8KNA9 CVE: NA
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/ardb/linux.git/commit/?h=arm...
Update the Kconfig RANDOMIZE_BASE depends on !JUMP_LABEL to resolve compilation conflicts between fpic and JUMP_LABEL.
Update PMD_ORDER to PMD_ENTRY_ORDER in OLK-6.6
-------------------------------------------------
This implements randomization of the placement of the kernel image inside the lowmem region. It is intended to work together with the decompressor to place the kernel at an offset in physical memory that is a multiple of 2 MB, and to take the same offset into account when creating the virtual mapping.
This uses runtime relocation of the kernel built as a PIE binary, to fix up all absolute symbol references to refer to their runtime virtual address. The physical-to-virtual mapping remains unchanged.
In order to allow the decompressor to hand over to the core kernel without making assumptions that are not guaranteed to hold when invoking the core kernel directly using bootloaders that are not KASLR aware, the KASLR offset is expected to be placed in r3 when entering the kernel 4 bytes past the entry point, skipping the first instruction.
Conflicts: arch/arm/kernel/head.S
Cc: Russell King linux@armlinux.org.uk Signed-off-by: Ard Biesheuvel ard.biesheuvel@linaro.org Signed-off-by: Cui GaoSheng cuigaosheng1@huawei.com Reviewed-by: Xiu Jianfeng xiujianfeng@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com Signed-off-by: Felix Fu fuzhen5@huawei.com --- arch/arm/Kconfig | 12 +++++ arch/arm/kernel/head.S | 102 +++++++++++++++++++++++++++++++++++++---- 2 files changed, 105 insertions(+), 9 deletions(-)
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index a29613b7ea8b..4d23dc5d7867 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1457,6 +1457,18 @@ config RELOCATABLE depends on !XIP_KERNEL && !JUMP_LABEL select HAVE_ARCH_PREL32_RELOCATIONS
+config RANDOMIZE_BASE + bool "Randomize the address of the kernel image" + depends on MMU && AUTO_ZRELADDR + depends on !XIP_KERNEL && !ZBOOT_ROM && !JUMP_LABEL + select RELOCATABLE + select ARM_MODULE_PLTS if MODULES + select MODULE_REL_CRCS if MODVERSIONS + help + Randomizes the virtual and physical address at which the kernel + image is loaded, as a security feature that deters exploit attempts + relying on knowledge of the location of kernel internals. + endmenu
menu "Boot options" diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 0375154c1b70..e54e0edc36d3 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -69,6 +69,28 @@ kernel_sec_end: sub \rd, \rd, #PG_DIR_SIZE .endm
+ .macro get_kaslr_offset, reg +#ifdef CONFIG_RANDOMIZE_BASE + ldr_l \reg, __kaslr_offset +#else + mov \reg, #0 +#endif + .endm + + .macro add_kaslr_offset, reg, tmp +#ifdef CONFIG_RANDOMIZE_BASE + get_kaslr_offset \tmp + add \reg, \reg, \tmp +#endif + .endm + + .macro sub_kaslr_offset, reg, tmp +#ifdef CONFIG_RANDOMIZE_BASE + get_kaslr_offset \tmp + sub \reg, \reg, \tmp +#endif + .endm + /* * Kernel startup entry point. * --------------------------- @@ -94,6 +116,7 @@ kernel_sec_end: .equ swapper_pg_dir, . - PG_DIR_SIZE
ENTRY(stext) + mov r3, #0 @ normal entry point - clear r3 ARM_BE8(setend be ) @ ensure we are in BE8 mode
THUMB( badr r9, 1f ) @ Kernel is always entered in ARM. @@ -101,6 +124,16 @@ ENTRY(stext) THUMB( .thumb ) @ switch to Thumb now. THUMB(1: )
+#ifdef CONFIG_RANDOMIZE_BASE + str_l r3, __kaslr_offset, r9 @ offset in r3 if entered via kaslr ep + + .section ".bss", "aw", %nobits + .align 2 +__kaslr_offset: + .long 0 @ will be wiped before entering C code + .previous +#endif + #ifdef CONFIG_ARM_VIRT_EXT bl __hyp_stub_install #endif @@ -124,6 +157,7 @@ ENTRY(stext) #ifndef CONFIG_XIP_KERNEL adr_l r8, _text @ __pa(_text) sub r8, r8, #TEXT_OFFSET @ PHYS_OFFSET + sub_kaslr_offset r8, r12 #else ldr r8, =PLAT_PHYS_OFFSET @ always constant in this case #endif @@ -160,8 +194,8 @@ ENTRY(stext) * r0 will hold the CPU control register value, r1, r2, r4, and * r9 will be preserved. r5 will also be preserved if LPAE. */ - ldr r13, =__mmap_switched @ address to jump to after - @ mmu has been enabled + adr_l lr, __primary_switch @ address to jump to after + mov r13, lr @ mmu has been enabled badr lr, 1f @ return (PIC) address #ifdef CONFIG_ARM_LPAE mov r5, #0 @ high TTBR0 @@ -172,7 +206,8 @@ ENTRY(stext) ldr r12, [r10, #PROCINFO_INITFUNC] add r12, r12, r10 ret r12 -1: b __enable_mmu +1: get_kaslr_offset r12 @ get before turning MMU on + b __enable_mmu ENDPROC(stext) .ltorg
@@ -253,15 +288,20 @@ __create_page_tables: * set two variables to indicate the physical start and end of the * kernel. */ - add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ENTRY_ORDER) - ldr r6, =(_end - 1) + get_kaslr_offset r3 + add r0, r3, #PAGE_OFFSET + add r0, r4, r0, lsr #(SECTION_SHIFT - PMD_ENTRY_ORDER) + adr_l r6, _end - 1 + sub r6, r6, r8 + add r6, r6, #PAGE_OFFSET + add r3, r3, r8 adr_l r5, kernel_sec_start @ _pa(kernel_sec_start) #if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32 str r8, [r5, #4] @ Save physical start of kernel (BE) #else str r8, [r5] @ Save physical start of kernel (LE) #endif - orr r3, r8, r7 @ Add the MMU flags + orr r3, r3, r7 add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ENTRY_ORDER) 1: str r3, [r0], #1 << PMD_ENTRY_ORDER add r3, r3, #1 << SECTION_SHIFT @@ -411,7 +451,7 @@ ENTRY(secondary_startup) * Use the page tables supplied from __cpu_up. */ adr_l r3, secondary_data - mov_l r12, __secondary_switched + mov_l r12, __secondary_switch ldrd r4, r5, [r3, #0] @ get secondary_data.pgdir ARM_BE8(eor r4, r4, r5) @ Swap r5 and r4 in BE: ARM_BE8(eor r5, r4, r5) @ it can be done in 3 steps @@ -457,6 +497,7 @@ ENDPROC(__secondary_switched) * r4 = TTBR pointer (low word) * r5 = TTBR pointer (high word if LPAE) * r9 = processor ID + * r12 = KASLR offset * r13 = *virtual* address to jump to upon completion */ __enable_mmu: @@ -494,6 +535,7 @@ ENDPROC(__enable_mmu) * r1 = machine ID * r2 = atags or dtb pointer * r9 = processor ID + * r12 = KASLR offset * r13 = *virtual* address to jump to upon completion * * other registers depend on the function called upon completion @@ -509,10 +551,52 @@ ENTRY(__turn_mmu_on) mov r3, r3 mov r3, r13 ret r3 -__turn_mmu_on_end: ENDPROC(__turn_mmu_on) - .popsection
+__primary_switch: +#ifdef CONFIG_RELOCATABLE + adr_l r7, _text @ r7 := __pa(_text) + sub r7, r7, #TEXT_OFFSET @ r7 := PHYS_OFFSET + + adr_l r5, __rel_begin + adr_l r6, __rel_end + sub r5, r5, r7 + sub r6, r6, r7 + + add r5, r5, #PAGE_OFFSET + add r6, r6, #PAGE_OFFSET + add r5, r5, r12 + add r6, r6, r12 + + adr_l r3, __stubs_start @ __pa(__stubs_start) + sub r3, r3, r7 @ offset of __stubs_start + add r3, r3, #PAGE_OFFSET @ __va(__stubs_start) + sub r3, r3, #0xffff1000 @ subtract VA of stubs section + +0: cmp r5, r6 + bge 1f + ldm r5!, {r7, r8} @ load next relocation entry + cmp r8, #23 @ R_ARM_RELATIVE + bne 0b + cmp r7, #0xff000000 @ vector page? + addgt r7, r7, r3 @ fix up VA offset + ldr r8, [r7, r12] + add r8, r8, r12 + str r8, [r7, r12] + b 0b +1: +#endif + ldr pc, =__mmap_switched +ENDPROC(__primary_switch) + +#ifdef CONFIG_SMP +__secondary_switch: + ldr pc, =__secondary_switched +ENDPROC(__secondary_switch) +#endif + .ltorg +__turn_mmu_on_end: + .popsection
#ifdef CONFIG_SMP_ON_UP __HEAD