hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8MGE6
--------------------------------
Add breakpoint exception optimization support to improve livepatch success rate for ppc64/ppc32.
Signed-off-by: Yang Jihong yangjihong1@huawei.com Signed-off-by: Li Huafei lihuafei1@huawei.com Signed-off-by: Zheng Yejian zhengyejian1@huawei.com --- arch/powerpc/include/asm/livepatch.h | 9 ++++++ arch/powerpc/kernel/livepatch.c | 40 +++++++++++++++++++++++++++ arch/powerpc/kernel/livepatch_tramp.S | 34 +++++++++++++++++++++++ arch/powerpc/kernel/traps.c | 8 ++++++ 4 files changed, 91 insertions(+)
diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h index 3b2d089eb180..e83f0cc7b267 100644 --- a/arch/powerpc/include/asm/livepatch.h +++ b/arch/powerpc/include/asm/livepatch.h @@ -58,6 +58,7 @@ void livepatch_branch_stub(void); void livepatch_branch_stub_end(void); void livepatch_branch_trampoline(void); void livepatch_branch_trampoline_end(void); +void livepatch_brk_trampoline(void);
int livepatch_create_branch(unsigned long pc, unsigned long trampoline, unsigned long addr, struct module *me); @@ -75,6 +76,11 @@ struct arch_klp_data { #ifdef CONFIG_PPC64 struct ppc64_klp_btramp_entry trampoline; #endif + /* + * Saved opcode at the entry of the old func (which maybe replaced + * with breakpoint). + */ + u32 saved_opcode; };
#define KLP_MAX_REPLACE_SIZE sizeof_field(struct arch_klp_data, old_insns) @@ -87,6 +93,9 @@ long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func); int arch_klp_check_calltrace(bool (*check_func)(void *, int *, unsigned long), void *data); bool arch_check_jump_insn(unsigned long func_addr); int klp_patch_text(u32 *dst, const u32 *src, int len); +int klp_brk_handler(struct pt_regs *regs); +int arch_klp_add_breakpoint(struct arch_klp_data *arch_data, void *old_func); +void arch_klp_remove_breakpoint(struct arch_klp_data *arch_data, void *old_func);
#endif /* CONFIG_LIVEPATCH_WO_FTRACE */
diff --git a/arch/powerpc/kernel/livepatch.c b/arch/powerpc/kernel/livepatch.c index 09b405740cb5..c1f6c200d711 100644 --- a/arch/powerpc/kernel/livepatch.c +++ b/arch/powerpc/kernel/livepatch.c @@ -282,3 +282,43 @@ int klp_patch_text(u32 *dst, const u32 *src, int len) barrier(); return patch_instruction(dst, ppc_inst(src[0])); } + +int arch_klp_add_breakpoint(struct arch_klp_data *arch_data, void *old_func) +{ + ppc_inst_t insn = ppc_inst_read((const u32 *)old_func); + + arch_data->saved_opcode = ppc_inst_val(insn); + patch_instruction((u32 *)old_func, ppc_inst(BREAKPOINT_INSTRUCTION)); + return 0; +} + +void arch_klp_remove_breakpoint(struct arch_klp_data *arch_data, void *old_func) +{ + patch_instruction((u32 *)old_func, ppc_inst(arch_data->saved_opcode)); +} + +int klp_brk_handler(struct pt_regs *regs) +{ + void *brk_func = NULL; + unsigned long addr = regs->nip; + + if (user_mode(regs)) + return 0; + + brk_func = klp_get_brk_func((void *)addr); + if (!brk_func) + return 0; + +#ifdef CONFIG_PPC64 + /* + * Only static trampoline can be used here to prevent + * resource release caused by rollback. + */ + regs->gpr[PT_R11] = (unsigned long)brk_func; + regs->nip = ppc_function_entry((void *)livepatch_brk_trampoline); +#else + regs->nip = (unsigned long)brk_func; +#endif + + return 1; +} diff --git a/arch/powerpc/kernel/livepatch_tramp.S b/arch/powerpc/kernel/livepatch_tramp.S index d8baf8a98a97..643a3f6d1aae 100644 --- a/arch/powerpc/kernel/livepatch_tramp.S +++ b/arch/powerpc/kernel/livepatch_tramp.S @@ -90,3 +90,37 @@ _GLOBAL(livepatch_branch_trampoline) blr _GLOBAL(livepatch_branch_trampoline_end) nop + +/* + * This function is the trampoline of livepatch brk handler. + * + * brk -> traps + * - klp_brk_handler + * - set R11 to new_func address + * - set NIP to livepatch_brk_trampoline address + * see arch/powerpc/kernel/livepatch.c + */ +_GLOBAL(livepatch_brk_trampoline) + mflr r0 + std r0, 16(r1) + std r2, 24(r1) + stdu r1, -STACK_FRAME_MIN_SIZE(r1) + + /* Call NEW_FUNC */ + ld r12, 0(r11) /* load new func address to R12 */ + ld r2, 8(r11) /* set up new R2 */ + mtctr r12 /* load R12(new func address) to CTR */ + bctrl /* call new func */ + + /* + * Now we are returning from the patched function to the original + * caller A. We are free to use r11, r12 and we can use r2 until we + * restore it. + */ + addi r1, r1, STACK_FRAME_MIN_SIZE + ld r2, 24(r1) + ld r0, 16(r1) + mtlr r0 + + /* Return to original caller of live patched function */ + blr diff --git a/arch/powerpc/kernel/traps.c b/arch/powerpc/kernel/traps.c index fe3f720c9cd6..057f6167199b 100644 --- a/arch/powerpc/kernel/traps.c +++ b/arch/powerpc/kernel/traps.c @@ -70,6 +70,10 @@ #include <asm/disassemble.h> #include <asm/udbg.h>
+#ifdef CONFIG_LIVEPATCH_WO_FTRACE +#include <asm/livepatch.h> +#endif + #if defined(CONFIG_DEBUGGER) || defined(CONFIG_KEXEC_CORE) int (*__debugger)(struct pt_regs *regs) __read_mostly; int (*__debugger_ipi)(struct pt_regs *regs) __read_mostly; @@ -1496,6 +1500,10 @@ static void do_program_check(struct pt_regs *regs) if (kprobe_handler(regs)) return;
+#ifdef CONFIG_LIVEPATCH_WO_FTRACE + if (klp_brk_handler(regs)) + return; +#endif /* trap exception */ if (notify_die(DIE_BPT, "breakpoint", regs, 5, 5, SIGTRAP) == NOTIFY_STOP)