hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8MGE6
---------------------------
Initially completed the livepatch for ppc64be, the call from the old function to the new function using stub space.
We call from old func to new func, when return form new func, we need to restore R2. The previous module relocation was by adding an extra nop space after the call (bxxx) instruction to restore R2, but it is impossible to use extra space here, because we will not return after calling new func, so we need to use a trampoline space.
We will call new func in trampoline and then restore R2 when we return. Please note that we can also use old func as trampoline as a solution, but we are afraid that old func often does not have that much space to store trampoline instruction fragments.
The trampoline can be implemented as global. However we need to implement a trampoline for each function and improve its stack check.
Our call chain to the new function looks like this:
CALLER old_func | old_func | -=> trampoline | -=> new_func
So we can't simply check that new_func, old_func and trampoline are both possible on the stack.
Signed-off-by: Cheng Jian cj.chengjian@huawei.com Signed-off-by: Dong Kai dongkai11@huawei.com Signed-off-by: Ye Weihua yeweihua4@huawei.com Signed-off-by: Zheng Yejian zhengyejian1@huawei.com --- arch/powerpc/Kconfig | 2 +- arch/powerpc/include/asm/livepatch.h | 50 ++++- arch/powerpc/include/asm/module.h | 3 + arch/powerpc/kernel/Makefile | 1 + arch/powerpc/kernel/livepatch_64.c | 264 ++++++++++++++++++++++++++ arch/powerpc/kernel/livepatch_tramp.S | 92 +++++++++ arch/powerpc/kernel/module_64.c | 109 +++++++++++ include/linux/livepatch.h | 5 + kernel/livepatch/core.c | 28 +++ 9 files changed, 551 insertions(+), 3 deletions(-) create mode 100644 arch/powerpc/kernel/livepatch_64.c create mode 100644 arch/powerpc/kernel/livepatch_tramp.S
diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 59eaab656ab4..64b312a9384d 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -257,7 +257,7 @@ config PPC select HAVE_KRETPROBES select HAVE_LD_DEAD_CODE_DATA_ELIMINATION if HAVE_OBJTOOL_MCOUNT && (!ARCH_USING_PATCHABLE_FUNCTION_ENTRY || (!CC_IS_GCC || GCC_VERSION >= 110100)) select HAVE_LIVEPATCH_FTRACE if HAVE_DYNAMIC_FTRACE_WITH_REGS - select HAVE_LIVEPATCH_WO_FTRACE if PPC32 + select HAVE_LIVEPATCH_WO_FTRACE if (PPC64 && CPU_BIG_ENDIAN && PPC64_ELF_ABI_V1) || PPC32 select HAVE_MOD_ARCH_SPECIFIC select HAVE_NMI if PERF_EVENTS || (PPC64 && PPC_BOOK3S) select HAVE_OPTPROBES diff --git a/arch/powerpc/include/asm/livepatch.h b/arch/powerpc/include/asm/livepatch.h index aa36c6e09110..3b2d089eb180 100644 --- a/arch/powerpc/include/asm/livepatch.h +++ b/arch/powerpc/include/asm/livepatch.h @@ -23,16 +23,62 @@ static inline void klp_init_thread_info(struct task_struct *p) { }
#ifdef CONFIG_LIVEPATCH_WO_FTRACE
+struct klp_func; + +#ifdef CONFIG_PPC64 +/* + * use the livepatch stub to jump to the trampoline. + * It is similar to stub, but does not need to save + * and load R2. + * struct ppc64_klp_bstub_entry + */ +struct ppc64_klp_bstub_entry { + u32 jump[5]; + u32 magic; + /* address for livepatch trampoline */ + u64 trampoline; +}; + +struct ppc64_klp_btramp_entry { + u32 jump[18]; + u32 magic; + union { + func_desc_t funcdata; + unsigned long saved_entry[3]; + }; +}; + +#define PPC64_INSN_SIZE 4 +#define LJMP_INSN_SIZE (sizeof(struct ppc64_klp_bstub_entry) / PPC64_INSN_SIZE) + +/* STUB_MAGIC 0x73747562 "stub" */ +#define BRANCH_STUB_MAGIC 0x73747563 /* stub + 1 */ +#define BRANCH_TRAMPOLINE_MAGIC 0x73747564 /* stub + 2 */ +void livepatch_branch_stub(void); +void livepatch_branch_stub_end(void); +void livepatch_branch_trampoline(void); +void livepatch_branch_trampoline_end(void); + +int livepatch_create_branch(unsigned long pc, unsigned long trampoline, + unsigned long addr, struct module *me); +struct klp_object; +int arch_klp_init_func(struct klp_object *obj, struct klp_func *func); +void *arch_klp_mem_alloc(size_t size); +void arch_klp_mem_free(void *mem); +#else /* !CONFIG_PPC64 */ #define PPC32_INSN_SIZE 4 #define LJMP_INSN_SIZE 4 +#endif /* CONFIG_PPC64 */ + struct arch_klp_data { u32 old_insns[LJMP_INSN_SIZE]; +#ifdef CONFIG_PPC64 + struct ppc64_klp_btramp_entry trampoline; +#endif };
#define KLP_MAX_REPLACE_SIZE sizeof_field(struct arch_klp_data, old_insns)
-struct klp_func; - /* kernel livepatch instruction barrier */ #define klp_smp_isb() __smp_lwsync() int arch_klp_patch_func(struct klp_func *func); diff --git a/arch/powerpc/include/asm/module.h b/arch/powerpc/include/asm/module.h index a8e2e8339fb7..963352a9b2f4 100644 --- a/arch/powerpc/include/asm/module.h +++ b/arch/powerpc/include/asm/module.h @@ -38,6 +38,9 @@ struct mod_arch_specific { /* For module function descriptor dereference */ unsigned long start_opd; unsigned long end_opd; +#ifdef CONFIG_LIVEPATCH_WO_FTRACE + unsigned long toc; +#endif #else /* powerpc64 */ /* Indices of PLT sections within module. */ unsigned int core_plt_section; diff --git a/arch/powerpc/kernel/Makefile b/arch/powerpc/kernel/Makefile index ad7f7183cae0..2a66f1dd810a 100644 --- a/arch/powerpc/kernel/Makefile +++ b/arch/powerpc/kernel/Makefile @@ -211,6 +211,7 @@ obj-$(CONFIG_ALTIVEC) += vector.o obj-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE) += prom_init.o obj64-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE) += prom_entry_64.o extra-$(CONFIG_PPC_OF_BOOT_TRAMPOLINE) += prom_init_check +obj64-$(CONFIG_LIVEPATCH_WO_FTRACE) += livepatch_tramp.o
obj-$(CONFIG_PPC64) += $(obj64-y) obj-$(CONFIG_PPC32) += $(obj32-y) diff --git a/arch/powerpc/kernel/livepatch_64.c b/arch/powerpc/kernel/livepatch_64.c new file mode 100644 index 000000000000..c286da982558 --- /dev/null +++ b/arch/powerpc/kernel/livepatch_64.c @@ -0,0 +1,264 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * livepatch.c - powerpc-specific Kernel Live Patching Core + * + * Copyright (C) 2018 Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see http://www.gnu.org/licenses/. + */ + +#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt + +#include <linux/module.h> +#include <linux/moduleloader.h> +#include <linux/uaccess.h> +#include <linux/livepatch.h> +#include <linux/slab.h> +#include <linux/sizes.h> +#include <linux/kallsyms.h> +#include <asm/livepatch.h> +#include <asm/cacheflush.h> +#include <asm/code-patching.h> +#include <asm/elf.h> + +int arch_klp_check_activeness_func(struct klp_func *func, int enable, + klp_add_func_t add_func, struct list_head *func_list) +{ + int ret; + unsigned long func_addr, func_size; + struct klp_func_node *func_node = NULL; + + func_node = func->func_node; + /* Check func address in stack */ + if (enable) { + if (func->patched || func->force == KLP_ENFORCEMENT) + return 0; + /* + * When enable, checking the currently + * active functions. + */ + if (list_empty(&func_node->func_stack)) { + /* + * No patched on this function + * [ the origin one ] + */ + func_addr = (unsigned long)func->old_func; + func_size = func->old_size; + } else { + /* + * Previously patched function + * [ the active one ] + */ + struct klp_func *prev; + + prev = list_first_or_null_rcu(&func_node->func_stack, + struct klp_func, stack_node); + func_addr = ppc_function_entry((void *)prev->new_func); + func_size = prev->new_size; + } + /* + * When preemption is disabled and the + * replacement area does not contain a jump + * instruction, the migration thread is + * scheduled to run stop machine only after the + * excution of instructions to be repalced is + * complete. + */ + if (IS_ENABLED(CONFIG_PREEMPTION) || + IS_ENABLED(CONFIG_LIVEPATCH_BREAKPOINT_NO_STOP_MACHINE) || + (func->force == KLP_NORMAL_FORCE) || + arch_check_jump_insn(func_addr)) { + ret = add_func(func_list, func_addr, func_size, + func->old_name, func->force); + if (ret) + return ret; + } + } else { + /* + * When disable, check for the function itself + * which to be unpatched. + */ + func_addr = ppc_function_entry((void *)func->new_func); + func_size = func->new_size; + ret = add_func(func_list, func_addr, + func_size, func->old_name, 0); + if (ret) + return ret; + } + + /* + * Check trampoline in stack + * new_func callchain: + * old_func + * -=> trampoline + * -=> new_func + * so, we should check all the func in the callchain + */ + if (func_addr != (unsigned long)func->old_func) { +#ifdef CONFIG_PREEMPTION + /* + * No scheduling point in the replacement + * instructions. Therefore, when preemption is + * not enabled, atomic execution is performed + * and these instructions will not appear on + * the stack. + */ + func_addr = (unsigned long)func->old_func; + func_size = func->old_size; + ret = add_func(func_list, func_addr, + func_size, "OLD_FUNC", 0); + if (ret) + return ret; +#endif /* CONFIG_PREEMPTION */ + + if (func_node->arch_data.trampoline.magic != BRANCH_TRAMPOLINE_MAGIC) + return 0; + + func_addr = (unsigned long)&func_node->arch_data.trampoline; + func_size = sizeof(struct ppc64_klp_btramp_entry); + ret = add_func(func_list, func_addr, + func_size, "trampoline", 0); + if (ret) + return ret; + } + return 0; +} + +long arch_klp_save_old_code(struct arch_klp_data *arch_data, void *old_func) +{ + long ret; + int i; + + for (i = 0; i < LJMP_INSN_SIZE; i++) { + ret = copy_from_kernel_nofault(&arch_data->old_insns[i], + ((u32 *)old_func) + i, PPC64_INSN_SIZE); + if (ret) + break; + } + return ret; +} + +static int do_patch(unsigned long pc, unsigned long new_addr, + struct arch_klp_data *arch_data, struct module *old_mod) +{ + int ret; + + ret = livepatch_create_branch(pc, (unsigned long)&arch_data->trampoline, + new_addr, old_mod); + if (ret) { + pr_err("create branch failed, ret=%d\n", ret); + return -EPERM; + } + flush_icache_range(pc, pc + LJMP_INSN_SIZE * PPC64_INSN_SIZE); + pr_debug("[%s %d] old = 0x%lx/0x%lx/%pS, new = 0x%lx/0x%lx/%pS\n", + __func__, __LINE__, + pc, ppc_function_entry((void *)pc), (void *)pc, + new_addr, ppc_function_entry((void *)new_addr), + (void *)ppc_function_entry((void *)new_addr)); + return 0; +} + +int arch_klp_patch_func(struct klp_func *func) +{ + struct klp_func_node *func_node; + int ret; + + func_node = func->func_node; + list_add_rcu(&func->stack_node, &func_node->func_stack); + ret = do_patch((unsigned long)func->old_func, + (unsigned long)func->new_func, + &func_node->arch_data, func->old_mod); + if (ret) + list_del_rcu(&func->stack_node); + return ret; +} + +void arch_klp_unpatch_func(struct klp_func *func) +{ + struct klp_func_node *func_node; + struct klp_func *next_func; + unsigned long pc; + int ret; + + func_node = func->func_node; + pc = (unsigned long)func_node->old_func; + list_del_rcu(&func->stack_node); + if (list_empty(&func_node->func_stack)) { + ret = klp_patch_text((u32 *)pc, func_node->arch_data.old_insns, + LJMP_INSN_SIZE); + if (ret) { + pr_err("restore instruction failed, ret=%d\n", ret); + return; + } + + pr_debug("[%s %d] restore insns at 0x%lx\n", __func__, __LINE__, pc); + flush_icache_range(pc, pc + LJMP_INSN_SIZE * PPC64_INSN_SIZE); + } else { + next_func = list_first_or_null_rcu(&func_node->func_stack, + struct klp_func, stack_node); + do_patch(pc, (unsigned long)next_func->new_func, + &func_node->arch_data, func->old_mod); + } +} + +int arch_klp_init_func(struct klp_object *obj, struct klp_func *func) +{ + unsigned long new_addr = (unsigned long)func->new_func; + + /* + * ABI v1 address is address of the OPD entry, + * which contains address of fn. ABI v2 An address + * is simply the address of the function. + * + * The function descriptor is in the data section. So + * If new_addr is in the code segment, we think it is + * a function address, if addr isn't in the code segment, + * we consider it to be a function descriptor. + */ + if (!is_module_text_address(new_addr)) { + new_addr = (unsigned long)ppc_function_entry((void *)new_addr); + if (!kallsyms_lookup_size_offset((unsigned long)new_addr, + &func->new_size, NULL)) + return -ENOENT; + } + + func->this_mod = __module_text_address(new_addr); + if (!func->this_mod) + return -EINVAL; + + func->new_func_descr.addr = new_addr; + func->new_func_descr.toc = func->this_mod->arch.toc; + func->new_func_descr.env = 0; + func->new_func = (void *)&func->new_func_descr; + + return 0; +} + +/* + * Trampoline would be stored in the allocated memory and it need + * executable permission, so ppc64 use 'module_alloc' but not 'kmalloc'. + */ +void *arch_klp_mem_alloc(size_t size) +{ + void *mem = module_alloc(size); + + if (mem) + memset(mem, 0, size); /* initially clear the memory */ + return mem; +} + +void arch_klp_mem_free(void *mem) +{ + module_memfree(mem); +} diff --git a/arch/powerpc/kernel/livepatch_tramp.S b/arch/powerpc/kernel/livepatch_tramp.S new file mode 100644 index 000000000000..d8baf8a98a97 --- /dev/null +++ b/arch/powerpc/kernel/livepatch_tramp.S @@ -0,0 +1,92 @@ +/* SPDX-License-Identifier: GPL-2.0-or-later */ +/* + * livepatch.c - powerpc-specific Kernel Live Patching Core + * + * Copyright (C) 2023 Huawei Technologies Co., Ltd. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version 2 + * of the License, or (at your option) any later version. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, see http://www.gnu.org/licenses/. + */ + +#include <asm/asm-offsets.h> +#include <asm/ppc_asm.h> +#include <asm/ptrace.h> + +.section ".text","ax",@progbits + + /* + * Livepatch function branch stub. + * see struct ppc64_klp_bstub_entry + * use it jump to livepatch trampoline + */ +_GLOBAL(livepatch_branch_stub) + addis r11,r2, 0 /* <high> */ + addi r11,r11, 0 /* <low> */ + ld r12,24(r11) + mtctr r12 + bctr +_GLOBAL(livepatch_branch_stub_end) + nop /* for magic */ + + /* + * This function runs in the livepatch context, between two functions. + * As such it can only clobber registers which are volatile and used in + * function linkage. + * + * We get here when a function A, calls another function B, but B has + * been live patched with a new function C. + * + * On entry: + * - we have no stack frame and can not allocate one + * - LR points back to the original caller (in A) + * - CTR used to hold the new NIP for call + * - r0, r11 & r12 are free + * -- r11 point back to the bstub data which store (func descr) + * ---- 0(saved_entry) : new function address + * ---- 8(r0) : new R2(toc) for new function + * -- tag livepatch stack with r11 + * -- save temporary variables with r12 + */ +_GLOBAL(livepatch_branch_trampoline) + mflr r0 + std r0, 16(r1) + std r2, 24(r1) + stdu r1, -STACK_FRAME_MIN_SIZE(r1) + + /* Load func descr address to R11 */ + lis r11, 0 /* saved_entry@highest */ + ori r11,r11,0 /* saved_entry@higher */ + rldicr r11,r11,32,31 + oris r11,r11,0 /* saved_entry@high */ + ori r11,r11,0 /* saved_entry@low */ + + /* Call NEW_FUNC */ + ld r12, 0(r11) /* load new func address to R12 */ + ld r2, 8(r11) /* set up new R2 */ + mtctr r12 /* load R12(new func address) to CTR */ + bctrl /* call new func */ + + /* + * Now we are returning from the patched function to the original + * caller A. We are free to use r11, r12 and we can use r2 until we + * restore it. + */ + addi r1, r1, STACK_FRAME_MIN_SIZE + ld r2, 24(r1) + ld r0, 16(r1) + mtlr r0 + + /* Return to original caller of live patched function */ + blr +_GLOBAL(livepatch_branch_trampoline_end) + nop diff --git a/arch/powerpc/kernel/module_64.c b/arch/powerpc/kernel/module_64.c index 7112adc597a8..a1715d70d348 100644 --- a/arch/powerpc/kernel/module_64.c +++ b/arch/powerpc/kernel/module_64.c @@ -1046,6 +1046,9 @@ int apply_relocate_add(Elf64_Shdr *sechdrs, } }
+#ifdef CONFIG_LIVEPATCH_WO_FTRACE + me->arch.toc = my_r2(sechdrs, me); +#endif return 0; }
@@ -1107,3 +1110,109 @@ int module_finalize_ftrace(struct module *mod, const Elf_Shdr *sechdrs) return 0; } #endif + +#ifdef CONFIG_LIVEPATCH_WO_FTRACE +#include <asm/livepatch.h> +#include <asm/cacheflush.h> + +#define PPC_LIVEPATCH_BITMASK(v, n) (((v) >> (n)) & 0xffff) +#define PPC_LIVEPATCH_HIGHEST(v) PPC_LIVEPATCH_BITMASK(v, 48) +#define PPC_LIVEPATCH_HIGHER(v) PPC_LIVEPATCH_BITMASK(v, 32) +#define PPC_LIVEPATCH_HIGH(v) PPC_LIVEPATCH_BITMASK(v, 16) +#define PPC_LIVEPATCH_LOW(v) PPC_LIVEPATCH_BITMASK(v, 0) + +/* + * Patch jump stub to reference trampoline + * without saved the old R2 and load the new R2. + */ +static int livepatch_create_bstub(void *pc, unsigned long addr, struct module *me) +{ + long reladdr; + unsigned long my_r2; + unsigned long stub_start, stub_end, stub_size; + struct ppc64_klp_bstub_entry entry; + + /* Stub uses address relative to r2. */ + my_r2 = me ? me->arch.toc : kernel_toc_addr(); + reladdr = (unsigned long)pc - my_r2; + if (reladdr > 0x7FFFFFFF || reladdr < -(0x80000000L)) { + pr_err("%s: Address %p of jump stub out of range of %p.\n", + me ? me->name : "kernel", + (void *)reladdr, (void *)my_r2); + return 0; + } + + + stub_start = ppc_function_entry((void *)livepatch_branch_stub); + stub_end = ppc_function_entry((void *)livepatch_branch_stub_end); + stub_size = stub_end - stub_start; + memcpy(entry.jump, (u32 *)stub_start, stub_size); + + entry.jump[0] |= PPC_HA(reladdr); + entry.jump[1] |= PPC_LO(reladdr); + entry.magic = BRANCH_STUB_MAGIC; + entry.trampoline = addr; + + + /* skip breakpoint at first */ + memcpy(pc + PPC64_INSN_SIZE, (void *)&entry + PPC64_INSN_SIZE, + sizeof(entry) - PPC64_INSN_SIZE); + /* + * Avoid compile optimization, make sure that instructions + * except first breakpoint has been patched. + */ + barrier(); + memcpy(pc, (void *)&entry, PPC64_INSN_SIZE); + pr_debug("Create livepatch branch stub 0x%p with reladdr 0x%lx r2 0x%lx to trampoline 0x%lx\n", + pc, reladdr, my_r2, addr); + + return 1; +} + +static void livepatch_create_btramp(struct ppc64_klp_btramp_entry *entry, + unsigned long addr) +{ + unsigned long reladdr, tramp_start, tramp_end, tramp_size; + + tramp_start = ppc_function_entry((void *)livepatch_branch_trampoline); + tramp_end = ppc_function_entry((void *)livepatch_branch_trampoline_end); + tramp_size = tramp_end - tramp_start; + + if (entry->magic != BRANCH_TRAMPOLINE_MAGIC) { + reladdr = (unsigned long)entry->saved_entry; + + memcpy(entry->jump, (u32 *)tramp_start, tramp_size); + + entry->jump[4] |= PPC_LIVEPATCH_HIGHEST(reladdr); + entry->jump[5] |= PPC_LIVEPATCH_HIGHER(reladdr); + entry->jump[7] |= PPC_LIVEPATCH_HIGH(reladdr); + entry->jump[8] |= PPC_LIVEPATCH_LOW(reladdr); + + entry->magic = BRANCH_TRAMPOLINE_MAGIC; + } + entry->funcdata = func_desc(addr); + + flush_icache_range((unsigned long)entry, (unsigned long)entry + tramp_size); + + pr_debug("Create livepatch trampoline 0x%p+%lu/0x%lx to 0x%lx/0x%lx/%pS\n", + (void *)entry, tramp_size, (unsigned long)entry->saved_entry, + addr, ppc_function_entry((void *)addr), + (void *)ppc_function_entry((void *)addr)); +} + +int livepatch_create_branch(unsigned long pc, + unsigned long trampoline, + unsigned long addr, + struct module *me) +{ + + /* Create trampoline to addr(new func) */ + livepatch_create_btramp((struct ppc64_klp_btramp_entry *)trampoline, addr); + + /* Create stub to trampoline */ + if (!livepatch_create_bstub((void *)pc, trampoline, me)) + return -EINVAL; + + return 0; +} +#endif diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h index 9e0915201268..f04f96b99d50 100644 --- a/include/linux/livepatch.h +++ b/include/linux/livepatch.h @@ -83,6 +83,11 @@ struct klp_func { bool patched; #ifdef CONFIG_LIVEPATCH_FTRACE bool transition; +#endif +#if defined(CONFIG_LIVEPATCH_WO_FTRACE) && defined(CONFIG_PPC64) + struct module *old_mod; + struct module *this_mod; + struct func_desc new_func_descr; #endif void *func_node; /* Only used in the solution without ftrace */ }; diff --git a/kernel/livepatch/core.c b/kernel/livepatch/core.c index 2abf8db039d0..5fcf5712de3b 100644 --- a/kernel/livepatch/core.c +++ b/kernel/livepatch/core.c @@ -93,6 +93,11 @@ static void klp_find_object_module(struct klp_object *obj) } #else /* !CONFIG_LIVEPATCH_FTRACE */ static int klp_find_object_module(struct klp_object *obj); + +int __weak arch_klp_init_func(struct klp_object *obj, struct klp_func *func) +{ + return 0; +} #endif /* CONFIG_LIVEPATCH_FTRACE */
static bool klp_initialized(void) @@ -850,6 +855,9 @@ void klp_free_replaced_patches_async(struct klp_patch *new_patch)
static int klp_init_func(struct klp_object *obj, struct klp_func *func) { +#ifndef CONFIG_LIVEPATCH_FTRACE + int ret; +#endif if (!func->old_name) return -EINVAL;
@@ -872,6 +880,16 @@ static int klp_init_func(struct klp_object *obj, struct klp_func *func) func->patched = false; #ifdef CONFIG_LIVEPATCH_FTRACE func->transition = false; +#else +#ifdef CONFIG_PPC64 + if (klp_is_module(obj)) + func->old_mod = obj->mod; + else + func->old_mod = NULL; +#endif + ret = arch_klp_init_func(obj, func); + if (ret) + return ret; #endif
/* The format for the sysfs directory is <function,sympos> where sympos @@ -952,6 +970,16 @@ static int klp_init_object_loaded(struct klp_patch *patch, if (ret) return ret;
+#ifdef CONFIG_PPC64 + /* + * PPC64 big endian binary format is 'elfv1' defaultly, actual + * symbol name of old function need a prefix '.' (related + * feature 'function descriptor'), otherwise size found by + * 'kallsyms_lookup_size_offset' may be abnormal. + */ + if (func->old_name[0] != '.') + pr_warn("old_name '%s' may miss the prefix '.'\n", func->old_name); +#endif ret = kallsyms_lookup_size_offset((unsigned long)func->old_func, &func->old_size, NULL); #ifdef CONFIG_LIVEPATCH_FTRACE