From: Ard Biesheuvel ard.biesheuvel@linaro.org
maillist inclusion commit 11f8bbc5b0d4d76b3d7114bf9af1805607a20372 category: feature feature: ARM kaslr support bugzilla: https://gitee.com/openeuler/kernel/issues/I8KNA9 CVE: NA
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/ardb/linux.git/commit/?h=arm...
-------------------------------------------------
The location of the ARM vector table in virtual memory is not a compile time constant, and so the virtual addresses of the various entry points are rather meaningless (although they are most likely to reside at the offsets below)
ffff1004 t vector_rst ffff1020 t vector_irq ffff10a0 t vector_dabt ffff1120 t vector_pabt ffff11a0 t vector_und ffff1220 t vector_addrexcptn ffff1240 T vector_fiq
However, when running with KASLR enabled, the virtual addresses are subject to runtime relocation, which means we should avoid to take absolute references to these symbols, not only directly (by taking the address in C code), but also via /proc/kallsyms or other kernel facilities that deal with ELF symbols. For instance, /proc/kallsyms will list their addresses as
0abf1004 t vector_rst 0abf1020 t vector_irq 0abf10a0 t vector_dabt 0abf1120 t vector_pabt 0abf11a0 t vector_und 0abf1220 t vector_addrexcptn 0abf1240 T vector_fiq
when running randomized, which may confuse tools like perf that may use /proc/kallsyms to annotate stack traces.
So use .L prefixes for these symbols. This will prevent them from being visible at all outside the assembler source.
Confilicts: arch/arm/include/asm/vmlinux.lds.h arch/arm/kernel/entry-armv.S
Signed-off-by: Ard Biesheuvel ard.biesheuvel@linaro.org Signed-off-by: Cui GaoSheng cuigaosheng1@huawei.com Reviewed-by: Xiu Jianfeng xiujianfeng@huawei.com Signed-off-by: Zheng Zengkai zhengzengkai@huawei.com Signed-off-by: Felix Fu fuzhen5@huawei.com --- arch/arm/include/asm/vmlinux.lds.h | 2 -- arch/arm/kernel/entry-armv.S | 40 ++++++++++++++++-------------- 2 files changed, 21 insertions(+), 21 deletions(-)
diff --git a/arch/arm/include/asm/vmlinux.lds.h b/arch/arm/include/asm/vmlinux.lds.h index 579becda9453..825af9c65db2 100644 --- a/arch/arm/include/asm/vmlinux.lds.h +++ b/arch/arm/include/asm/vmlinux.lds.h @@ -152,8 +152,6 @@ ARM_LMA(__stubs, .stubs); \ . = __stubs_lma + SIZEOF(.stubs); \ \ - PROVIDE(vector_fiq_offset = vector_fiq - ADDR(.vectors)); - #define ARM_TCM \ __itcm_start = ALIGN(4); \ .text_itcm ITCM_OFFSET : AT(__itcm_start - LOAD_OFFSET) { \ diff --git a/arch/arm/kernel/entry-armv.S b/arch/arm/kernel/entry-armv.S index 6150a716828c..10b84539d83a 100644 --- a/arch/arm/kernel/entry-armv.S +++ b/arch/arm/kernel/entry-armv.S @@ -853,7 +853,7 @@ vector_bhb_bpiall_\name: @ which gives a "context synchronisation". #endif
-vector_\name: +.Lvector_\name: .if \correction sub lr, lr, #\correction .endif @@ -882,7 +882,7 @@ vector_\name: mov r0, sp ARM( ldr lr, [pc, lr, lsl #2] ) movs pc, lr @ branch to handler in SVC mode -ENDPROC(vector_\name) +ENDPROC(.Lvector_\name)
#ifdef CONFIG_HARDEN_BRANCH_HISTORY .subsection 1 @@ -914,6 +914,10 @@ ENDPROC(vector_bhb_loop8_\name) .endm
.section .stubs, "ax", %progbits +#ifdef CONFIG_FIQ + .global vector_fiq_offset + .set vector_fiq_offset, .Lvector_fiq - . + 0x1000 +#endif @ These need to remain at the start of the section so that @ they are in range of the 'SWI' entries in the vector tables @ located 4k down. @@ -926,11 +930,11 @@ ENDPROC(vector_bhb_loop8_\name) .word vector_bhb_bpiall_swi #endif
-vector_rst: +.Lvector_rst: ARM( swi SYS_ERROR0 ) THUMB( svc #0 ) THUMB( nop ) - b vector_und + b .Lvector_und
/* * Interrupt dispatcher @@ -1032,8 +1036,8 @@ vector_rst: * (they're not supposed to happen, and won't happen in 32-bit data mode). */
-vector_addrexcptn: - b vector_addrexcptn +.Lvector_addrexcptn: + b .Lvector_addrexcptn
/*============================================================================= * FIQ "NMI" handler @@ -1062,42 +1066,40 @@ vector_addrexcptn: .long __fiq_svc @ e .long __fiq_svc @ f
- .globl vector_fiq - .section .vectors, "ax", %progbits - W(b) vector_rst - W(b) vector_und + W(b) .Lvector_rst + W(b) .Lvector_und ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_swi ) THUMB( .reloc ., R_ARM_THM_PC12, .L__vector_swi ) W(ldr) pc, . - W(b) vector_pabt - W(b) vector_dabt - W(b) vector_addrexcptn - W(b) vector_irq - W(b) vector_fiq + W(b) .Lvector_pabt + W(b) .Lvector_dabt + W(b) .Lvector_addrexcptn + W(b) .Lvector_irq + W(b) .Lvector_fiq
#ifdef CONFIG_HARDEN_BRANCH_HISTORY .section .vectors.bhb.loop8, "ax", %progbits - W(b) vector_rst + W(b) .Lvector_rst W(b) vector_bhb_loop8_und ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_bhb_loop8_swi ) THUMB( .reloc ., R_ARM_THM_PC12, .L__vector_bhb_loop8_swi ) W(ldr) pc, . W(b) vector_bhb_loop8_pabt W(b) vector_bhb_loop8_dabt - W(b) vector_addrexcptn + W(b) .Lvector_addrexcptn W(b) vector_bhb_loop8_irq W(b) vector_bhb_loop8_fiq
.section .vectors.bhb.bpiall, "ax", %progbits - W(b) vector_rst + W(b) .Lvector_rst W(b) vector_bhb_bpiall_und ARM( .reloc ., R_ARM_LDR_PC_G0, .L__vector_bhb_bpiall_swi ) THUMB( .reloc ., R_ARM_THM_PC12, .L__vector_bhb_bpiall_swi ) W(ldr) pc, . W(b) vector_bhb_bpiall_pabt W(b) vector_bhb_bpiall_dabt - W(b) vector_addrexcptn + W(b) .Lvector_addrexcptn W(b) vector_bhb_bpiall_irq W(b) vector_bhb_bpiall_fiq #endif