From: Felix Fu fuzhen5@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/src-openeuler/kernel/issues/IAI881 CVE: NA
-----------------------------------------
This reverts commit 08c92891c43701e0e68ace8ebf95eaeebb61142a.
Signed-off-by: Felix Fu fuzhen5@huawei.com --- arch/arm/Kconfig | 12 ----- arch/arm/kernel/head.S | 102 ++++------------------------------------- 2 files changed, 9 insertions(+), 105 deletions(-)
diff --git a/arch/arm/Kconfig b/arch/arm/Kconfig index 2483ce304375..a82bef783210 100644 --- a/arch/arm/Kconfig +++ b/arch/arm/Kconfig @@ -1458,18 +1458,6 @@ config RELOCATABLE depends on !XIP_KERNEL && !JUMP_LABEL select HAVE_ARCH_PREL32_RELOCATIONS
-config RANDOMIZE_BASE - bool "Randomize the address of the kernel image" - depends on MMU && AUTO_ZRELADDR - depends on !XIP_KERNEL && !ZBOOT_ROM && !JUMP_LABEL - select RELOCATABLE - select ARM_MODULE_PLTS if MODULES - select MODULE_REL_CRCS if MODVERSIONS - help - Randomizes the virtual and physical address at which the kernel - image is loaded, as a security feature that deters exploit attempts - relying on knowledge of the location of kernel internals. - endmenu
menu "Boot options" diff --git a/arch/arm/kernel/head.S b/arch/arm/kernel/head.S index 13a84864d771..d3a1d1dff637 100644 --- a/arch/arm/kernel/head.S +++ b/arch/arm/kernel/head.S @@ -61,28 +61,6 @@ kernel_sec_end: .long 0 .popsection
- .macro get_kaslr_offset, reg -#ifdef CONFIG_RANDOMIZE_BASE - ldr_l \reg, __kaslr_offset -#else - mov \reg, #0 -#endif - .endm - - .macro add_kaslr_offset, reg, tmp -#ifdef CONFIG_RANDOMIZE_BASE - get_kaslr_offset \tmp - add \reg, \reg, \tmp -#endif - .endm - - .macro sub_kaslr_offset, reg, tmp -#ifdef CONFIG_RANDOMIZE_BASE - get_kaslr_offset \tmp - sub \reg, \reg, \tmp -#endif - .endm - /* * Kernel startup entry point. * --------------------------- @@ -108,7 +86,6 @@ kernel_sec_end: .equ swapper_pg_dir, . - PG_DIR_SIZE
ENTRY(stext) - mov r3, #0 @ normal entry point - clear r3 ARM_BE8(setend be ) @ ensure we are in BE8 mode
THUMB( badr r9, 1f ) @ Kernel is always entered in ARM. @@ -116,16 +93,6 @@ ENTRY(stext) THUMB( .thumb ) @ switch to Thumb now. THUMB(1: )
-#ifdef CONFIG_RANDOMIZE_BASE - str_l r3, __kaslr_offset, r9 @ offset in r3 if entered via kaslr ep - - .section ".bss", "aw", %nobits - .align 2 -__kaslr_offset: - .long 0 @ will be wiped before entering C code - .previous -#endif - #ifdef CONFIG_ARM_VIRT_EXT bl __hyp_stub_install #endif @@ -149,7 +116,6 @@ __kaslr_offset: #ifndef CONFIG_XIP_KERNEL adr_l r8, _text @ __pa(_text) sub r8, r8, #TEXT_OFFSET @ PHYS_OFFSET - sub_kaslr_offset r8, r12 #else ldr r8, =PLAT_PHYS_OFFSET @ always constant in this case #endif @@ -186,8 +152,8 @@ __kaslr_offset: * r0 will hold the CPU control register value, r1, r2, r4, and * r9 will be preserved. r5 will also be preserved if LPAE. */ - adr_l lr, __primary_switch @ address to jump to after - mov r13, lr @ mmu has been enabled + ldr r13, =__mmap_switched @ address to jump to after + @ mmu has been enabled badr lr, 1f @ return (PIC) address #ifdef CONFIG_ARM_LPAE mov r5, #0 @ high TTBR0 @@ -198,8 +164,7 @@ __kaslr_offset: ldr r12, [r10, #PROCINFO_INITFUNC] add r12, r12, r10 ret r12 -1: get_kaslr_offset r12 @ get before turning MMU on - b __enable_mmu +1: b __enable_mmu ENDPROC(stext) .ltorg
@@ -280,20 +245,15 @@ __create_page_tables: * set two variables to indicate the physical start and end of the * kernel. */ - get_kaslr_offset r3 - add r0, r3, #PAGE_OFFSET - add r0, r4, r0, lsr #(SECTION_SHIFT - PMD_ENTRY_ORDER) - adr_l r6, _end - 1 - sub r6, r6, r8 - add r6, r6, #PAGE_OFFSET - add r3, r3, r8 + add r0, r4, #KERNEL_OFFSET >> (SECTION_SHIFT - PMD_ENTRY_ORDER) + ldr r6, =(_end - 1) adr_l r5, kernel_sec_start @ _pa(kernel_sec_start) #if defined CONFIG_CPU_ENDIAN_BE8 || defined CONFIG_CPU_ENDIAN_BE32 str r8, [r5, #4] @ Save physical start of kernel (BE) #else str r8, [r5] @ Save physical start of kernel (LE) #endif - orr r3, r3, r7 + orr r3, r8, r7 @ Add the MMU flags add r6, r4, r6, lsr #(SECTION_SHIFT - PMD_ENTRY_ORDER) 1: str r3, [r0], #1 << PMD_ENTRY_ORDER add r3, r3, #1 << SECTION_SHIFT @@ -443,7 +403,7 @@ ENTRY(secondary_startup) * Use the page tables supplied from __cpu_up. */ adr_l r3, secondary_data - mov_l r12, __secondary_switch + mov_l r12, __secondary_switched ldrd r4, r5, [r3, #0] @ get secondary_data.pgdir ARM_BE8(eor r4, r4, r5) @ Swap r5 and r4 in BE: ARM_BE8(eor r5, r4, r5) @ it can be done in 3 steps @@ -489,7 +449,6 @@ ENDPROC(__secondary_switched) * r4 = TTBR pointer (low word) * r5 = TTBR pointer (high word if LPAE) * r9 = processor ID - * r12 = KASLR offset * r13 = *virtual* address to jump to upon completion */ __enable_mmu: @@ -527,7 +486,6 @@ ENDPROC(__enable_mmu) * r1 = machine ID * r2 = atags or dtb pointer * r9 = processor ID - * r12 = KASLR offset * r13 = *virtual* address to jump to upon completion * * other registers depend on the function called upon completion @@ -543,53 +501,11 @@ ENTRY(__turn_mmu_on) mov r3, r3 mov r3, r13 ret r3 -ENDPROC(__turn_mmu_on) - -__primary_switch: -#ifdef CONFIG_RELOCATABLE - adr_l r7, _text @ r7 := __pa(_text) - sub r7, r7, #TEXT_OFFSET @ r7 := PHYS_OFFSET - - adr_l r5, __rel_begin - adr_l r6, __rel_end - sub r5, r5, r7 - sub r6, r6, r7 - - add r5, r5, #PAGE_OFFSET - add r6, r6, #PAGE_OFFSET - add r5, r5, r12 - add r6, r6, r12 - - adr_l r3, __stubs_start @ __pa(__stubs_start) - sub r3, r3, r7 @ offset of __stubs_start - add r3, r3, #PAGE_OFFSET @ __va(__stubs_start) - sub r3, r3, #0xffff1000 @ subtract VA of stubs section - -0: cmp r5, r6 - bge 1f - ldm r5!, {r7, r8} @ load next relocation entry - cmp r8, #23 @ R_ARM_RELATIVE - bne 0b - cmp r7, #0xff000000 @ vector page? - addgt r7, r7, r3 @ fix up VA offset - ldr r8, [r7, r12] - add r8, r8, r12 - str r8, [r7, r12] - b 0b -1: -#endif - ldr pc, =__mmap_switched -ENDPROC(__primary_switch) - -#ifdef CONFIG_SMP -__secondary_switch: - ldr pc, =__secondary_switched -ENDPROC(__secondary_switch) -#endif - .ltorg __turn_mmu_on_end: +ENDPROC(__turn_mmu_on) .popsection
+ #ifdef CONFIG_SMP_ON_UP __HEAD __fixup_smp: