From: Ye Weihua yeweihua4@huawei.com
hulk inclusion category: feature bugzilla: 119440 https://gitee.com/openeuler/kernel/issues/I4DDEL
--------------------------------
Enable stack optimize on ppc32.
Signed-off-by: Ye Weihua yeweihua4@huawei.com Reviewed-by: Yang Jihong yangjihong1@huawei.com Signed-off-by: Chen Jun chenjun102@huawei.com --- arch/powerpc/kernel/livepatch_32.c | 191 +++++++++++++++++++++++++---- 1 file changed, 166 insertions(+), 25 deletions(-)
diff --git a/arch/powerpc/kernel/livepatch_32.c b/arch/powerpc/kernel/livepatch_32.c index db6dbe091281..d22c44edc7c7 100644 --- a/arch/powerpc/kernel/livepatch_32.c +++ b/arch/powerpc/kernel/livepatch_32.c @@ -31,14 +31,15 @@
#if defined (CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY) || \ defined (CONFIG_LIVEPATCH_WO_FTRACE) -#define LJMP_INSN_SIZE 4 +#define LJMP_INSN_SIZE 4 #define MAX_SIZE_TO_CHECK (LJMP_INSN_SIZE * sizeof(u32)) +#define CHECK_JUMP_RANGE LJMP_INSN_SIZE
struct klp_func_node { struct list_head node; struct list_head func_stack; void *old_func; - u32 old_insns[LJMP_INSN_SIZE]; + u32 old_insns[LJMP_INSN_SIZE]; };
static LIST_HEAD(klp_func_list); @@ -57,14 +58,40 @@ static struct klp_func_node *klp_find_func_node(void *old_func) #endif
#ifdef CONFIG_LIVEPATCH_STOP_MACHINE_CONSISTENCY +/* + * The instruction set on ppc32 is RISC. + * The instructions of BL and BLA are 010010xxxxxxxxxxxxxxxxxxxxxxxxx1. + * The instructions of BCL and BCLA are 010000xxxxxxxxxxxxxxxxxxxxxxxxx1. + * The instruction of BCCTRL is 010011xxxxxxxxxx0000010000100001. + * The instruction of BCLRL is 010011xxxxxxxxxx0000000000100001. + */ +static bool is_jump_insn(u32 insn) +{ + u32 tmp1 = (insn & 0xfc000001); + u32 tmp2 = (insn & 0xfc00ffff); + + if ((tmp1 == 0x48000001) || (tmp1 == 0x40000001) || + (tmp2 == 0x4c000421) || (tmp2 == 0x4c000021)) + return true; + return false; +} + +struct klp_func_list { + struct klp_func_list *next; + unsigned long func_addr; + unsigned long func_size; + const char *func_name; + int force; +}; + struct stackframe { unsigned long sp; unsigned long pc; };
struct walk_stackframe_args { - struct klp_patch *patch; int enable; + struct klp_func_list *other_funcs; int ret; };
@@ -88,22 +115,59 @@ static inline int klp_compare_address(unsigned long pc, unsigned long func_addr, return 0; }
-static int klp_check_activeness_func(struct stackframe *frame, void *data) +static bool check_jump_insn(unsigned long func_addr) { - struct walk_stackframe_args *args = data; - struct klp_patch *patch = args->patch; + unsigned long i; + u32 *insn = (u32*)func_addr; + + for (i = 0; i < CHECK_JUMP_RANGE; i++) { + if (is_jump_insn(*insn)) { + return true; + } + insn++; + } + return false; +} + +static int add_func_to_list(struct klp_func_list **funcs, struct klp_func_list **func, + unsigned long func_addr, unsigned long func_size, const char *func_name, + int force) +{ + if (*func == NULL) { + *funcs = (struct klp_func_list*)kzalloc(sizeof(**funcs), GFP_ATOMIC); + if (!(*funcs)) + return -ENOMEM; + *func = *funcs; + } else { + (*func)->next = (struct klp_func_list*)kzalloc(sizeof(**funcs), + GFP_ATOMIC); + if (!(*func)->next) + return -ENOMEM; + *func = (*func)->next; + } + (*func)->func_addr = func_addr; + (*func)->func_size = func_size; + (*func)->func_name = func_name; + (*func)->force = force; + (*func)->next = NULL; + return 0; +} + +static int klp_check_activeness_func(struct klp_patch *patch, int enable, + struct klp_func_list **nojump_funcs, + struct klp_func_list **other_funcs) +{ + int ret; struct klp_object *obj; struct klp_func *func; unsigned long func_addr, func_size; - const char *func_name; struct klp_func_node *func_node; - - if (args->ret) - return args->ret; + struct klp_func_list *pnjump = NULL; + struct klp_func_list *pother = NULL;
for (obj = patch->objs; obj->funcs; obj++) { for (func = obj->funcs; func->old_name; func++) { - if (args->enable) { + if (enable) { if (func->force == KLP_ENFORCEMENT) continue; /* @@ -132,23 +196,52 @@ static int klp_check_activeness_func(struct stackframe *frame, void *data) func_addr = (unsigned long)prev->new_func; func_size = prev->new_size; } + if ((func->force == KLP_STACK_OPTIMIZE) && + !check_jump_insn(func_addr)) + ret = add_func_to_list(nojump_funcs, &pnjump, + func_addr, func_size, + func->old_name, func->force); + else + ret = add_func_to_list(other_funcs, &pother, + func_addr, func_size, + func->old_name, func->force); + if (ret) + return ret; } else { /* - * When disable, check for the function itself + * When disable, check for the previously + * patched function and the function itself * which to be unpatched. */ + func_node = klp_find_func_node(func->old_func); + if (!func_node) + return -EINVAL; + if (list_is_singular(&func_node->func_stack)) { + func_addr = (unsigned long)func->old_func; + func_size = func->old_size; + } else { + struct klp_func *prev; + + prev = list_first_or_null_rcu( + &func_node->func_stack, + struct klp_func, stack_node); + func_addr = (unsigned long)prev->new_func; + func_size = prev->new_size; + } + ret = add_func_to_list(other_funcs, &pother, func_addr, + func_size, func->old_name, 0); + if (ret) + return ret; func_addr = (unsigned long)func->new_func; func_size = func->new_size; + ret = add_func_to_list(other_funcs, &pother, func_addr, + func_size, func->old_name, 0); + if (ret) + return ret; } - func_name = func->old_name; - args->ret = klp_compare_address(frame->pc, func_addr, func_name, - klp_size_to_check(func_size, func->force)); - if (args->ret) - return args->ret; } } - - return args->ret; + return 0; }
static int unwind_frame(struct task_struct *tsk, struct stackframe *frame) @@ -180,16 +273,56 @@ void notrace klp_walk_stackframe(struct stackframe *frame, } }
+static bool check_func_list(struct klp_func_list *funcs, int *ret, unsigned long pc) +{ + while (funcs != NULL) { + *ret = klp_compare_address(pc, funcs->func_addr, funcs->func_name, + klp_size_to_check(funcs->func_size, funcs->force)); + if (*ret) { + return false; + } + funcs = funcs->next; + } + return true; +} + +static int klp_check_jump_func(struct stackframe *frame, void *data) +{ + struct walk_stackframe_args *args = data; + struct klp_func_list *other_funcs = args->other_funcs; + + if (!check_func_list(other_funcs, &args->ret, frame->pc)) { + return args->ret; + } + return 0; +} + +static void free_list(struct klp_func_list **funcs) +{ + struct klp_func_list *p; + + while (*funcs != NULL) { + p = *funcs; + *funcs = (*funcs)->next; + kfree(p); + } +} + int klp_check_calltrace(struct klp_patch *patch, int enable) { struct task_struct *g, *t; struct stackframe frame; unsigned long *stack; int ret = 0; + struct klp_func_list *nojump_funcs = NULL; + struct klp_func_list *other_funcs = NULL; + + ret = klp_check_activeness_func(patch, enable, &nojump_funcs, &other_funcs); + if (ret) + goto out;
struct walk_stackframe_args args = { - .patch = patch, - .enable = enable, + .other_funcs = other_funcs, .ret = 0 };
@@ -230,17 +363,25 @@ int klp_check_calltrace(struct klp_patch *patch, int enable)
frame.sp = (unsigned long)stack; frame.pc = stack[STACK_FRAME_LR_SAVE]; - klp_walk_stackframe(&frame, klp_check_activeness_func, - t, &args); - if (args.ret) { - ret = args.ret; + if (!check_func_list(nojump_funcs, &ret, frame.pc)) { pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); show_stack(t, NULL, KERN_INFO); goto out; } + if (other_funcs != NULL) { + klp_walk_stackframe(&frame, klp_check_jump_func, t, &args); + if (args.ret) { + ret = args.ret; + pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); + show_stack(t, NULL, KERN_INFO); + goto out; + } + } }
out: + free_list(&nojump_funcs); + free_list(&other_funcs); return ret; } #endif