From: Liu Shixin liushixin2@huawei.com
hulk inclusion category: bugfix bugzilla: 181005 https://gitee.com/openeuler/kernel/issues/I4DDEL
-------------------------------------------------
Currently if KFENCE is enabled in arm64, the entire linear map will be mapped at page granularity which seems overkilled. Actually only the kfence pool requires to be mapped at page granularity. We can remove the restriction from KFENCE and force the linear mapping of the kfence pool at page granularity later in arch_kfence_init_pool().
Signed-off-by: Liu Shixin liushixin2@huawei.com Reviewed-by: Kefeng Wang wangkefeng.wang@huawei.com
Signed-off-by: Chen Jun chenjun102@huawei.com --- arch/arm64/include/asm/kfence.h | 70 ++++++++++++++++++++++++++++++++- arch/arm64/mm/mmu.c | 6 +-- 2 files changed, 71 insertions(+), 5 deletions(-)
diff --git a/arch/arm64/include/asm/kfence.h b/arch/arm64/include/asm/kfence.h index d061176d57ea..322e95bc228d 100644 --- a/arch/arm64/include/asm/kfence.h +++ b/arch/arm64/include/asm/kfence.h @@ -8,9 +8,77 @@ #ifndef __ASM_KFENCE_H #define __ASM_KFENCE_H
+#include <linux/kfence.h> #include <asm/cacheflush.h> +#include <asm/pgalloc.h>
-static inline bool arch_kfence_init_pool(void) { return true; } +static inline int split_pud_page(pud_t *pud, unsigned long addr) +{ + int i; + pmd_t *pmd = pmd_alloc_one(&init_mm, addr); + unsigned long pfn = PFN_DOWN(__pa(addr)); + + if (!pmd) + return -ENOMEM; + + for (i = 0; i < PTRS_PER_PMD; i++) + set_pmd(pmd + i, pmd_mkhuge(pfn_pmd(pfn + i * PTRS_PER_PTE, PAGE_KERNEL))); + + smp_wmb(); /* See comment in __pte_alloc */ + pud_populate(&init_mm, pud, pmd); + + flush_tlb_kernel_range(addr, addr + PUD_SIZE); + return 0; +} + +static inline int split_pmd_page(pmd_t *pmd, unsigned long addr) +{ + int i; + pte_t *pte = pte_alloc_one_kernel(&init_mm); + unsigned long pfn = PFN_DOWN(__pa(addr)); + + if (!pte) + return -ENOMEM; + + for (i = 0; i < PTRS_PER_PTE; i++) + set_pte(pte + i, pfn_pte(pfn + i, PAGE_KERNEL)); + + smp_wmb(); /* See comment in __pte_alloc */ + pmd_populate_kernel(&init_mm, pmd, pte); + + flush_tlb_kernel_range(addr, addr + PMD_SIZE); + return 0; +} + +static inline bool arch_kfence_init_pool(void) +{ + unsigned long addr; + pgd_t *pgd; + p4d_t *p4d; + pud_t *pud; + pmd_t *pmd; + + for (addr = (unsigned long)__kfence_pool; is_kfence_address((void *)addr); + addr += PAGE_SIZE) { + pgd = pgd_offset(&init_mm, addr); + if (pgd_leaf(*pgd)) + return false; + p4d = p4d_offset(pgd, addr); + if (p4d_leaf(*p4d)) + return false; + pud = pud_offset(p4d, addr); + if (pud_leaf(*pud)) { + if (split_pud_page(pud, addr & PUD_MASK)) + return false; + } + pmd = pmd_offset(pud, addr); + if (pmd_leaf(*pmd)) { + if (split_pmd_page(pmd, addr & PMD_MASK)) + return false; + } + } + return true; +}
static inline bool kfence_protect_page(unsigned long addr, bool protect) { diff --git a/arch/arm64/mm/mmu.c b/arch/arm64/mm/mmu.c index b6a9895d6655..1c2a965e65b3 100644 --- a/arch/arm64/mm/mmu.c +++ b/arch/arm64/mm/mmu.c @@ -492,8 +492,7 @@ static void __init map_mem(pgd_t *pgdp) int flags = 0; u64 i;
- if (rodata_full || crash_mem_map || debug_pagealloc_enabled() || - IS_ENABLED(CONFIG_KFENCE)) + if (rodata_full || crash_mem_map || debug_pagealloc_enabled()) flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
/* @@ -1458,8 +1457,7 @@ int arch_add_memory(int nid, u64 start, u64 size, * KFENCE requires linear map to be mapped at page granularity, so that * it is possible to protect/unprotect single pages in the KFENCE pool. */ - if (rodata_full || debug_pagealloc_enabled() || - IS_ENABLED(CONFIG_KFENCE)) + if (rodata_full || debug_pagealloc_enabled()) flags = NO_BLOCK_MAPPINGS | NO_CONT_MAPPINGS;
__create_pgd_mapping(swapper_pg_dir, start, __phys_to_virt(start),
From: Ye Weihua yeweihua4@huawei.com
hulk inclusion category: feature bugzilla: 119440 https://gitee.com/openeuler/kernel/issues/I4DDEL
--------------------------------
The previous code does not consider the scenario where preemption is enabled. In the kernel with CONFIG_PREEMPT enabled, even if the function entry does not have a jump instruction, the function may be interrupted by an interrupt. If preemption is triggered when the interrupt is returned, the function is in the middle of the call stack of the thread.
The stack depth optimization solution needs to be adjusted as follows: 1. For functions with jump instructions in the entry, check the entire thread stack in any case. 2. For a function whose entry does not have a jump instruction: a. If CONFIG_PREEMPT is disabled, the function entry code cannot be on any thread stack. Therefore, no check is required. b. If CONFIG_PREEMPT is turned on, check entire thread stack.
Signed-off-by: Ye Weihua yeweihua4@huawei.com Reviewed-by: Kuohai Xu xukuohai@huawei.com
Signed-off-by: Chen Jun chenjun102@huawei.com --- arch/arm/kernel/livepatch.c | 66 +++++++++++++++------------- arch/arm64/kernel/livepatch.c | 66 +++++++++++++++------------- arch/powerpc/kernel/livepatch_32.c | 66 +++++++++++++++------------- arch/powerpc/kernel/livepatch_64.c | 70 ++++++++++++++++-------------- arch/x86/kernel/livepatch.c | 69 ++++++++++++++--------------- 5 files changed, 180 insertions(+), 157 deletions(-)
diff --git a/arch/arm/kernel/livepatch.c b/arch/arm/kernel/livepatch.c index 1ec326706a7b..e98c8e47a344 100644 --- a/arch/arm/kernel/livepatch.c +++ b/arch/arm/kernel/livepatch.c @@ -104,7 +104,7 @@ struct klp_func_list {
struct walk_stackframe_args { int enable; - struct klp_func_list *other_funcs; + struct klp_func_list *check_funcs; int ret; };
@@ -167,16 +167,14 @@ static int add_func_to_list(struct klp_func_list **funcs, struct klp_func_list * }
static int klp_check_activeness_func(struct klp_patch *patch, int enable, - struct klp_func_list **nojump_funcs, - struct klp_func_list **other_funcs) + struct klp_func_list **check_funcs) { int ret; struct klp_object *obj; struct klp_func_node *func_node; struct klp_func *func; unsigned long func_addr, func_size; - struct klp_func_list *pnjump = NULL; - struct klp_func_list *pother = NULL; + struct klp_func_list *pcheck = NULL;
for (obj = patch->objs; obj->funcs; obj++) { for (func = obj->funcs; func->old_name; func++) { @@ -209,17 +207,23 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, func_addr = (unsigned long)prev->new_func; func_size = prev->new_size; } - if ((func->force == KLP_STACK_OPTIMIZE) && - !check_jump_insn(func_addr)) - ret = add_func_to_list(nojump_funcs, &pnjump, - func_addr, func_size, - func->old_name, func->force); - else - ret = add_func_to_list(other_funcs, &pother, + /* + * When preemption is disabled and the + * replacement area does not contain a jump + * instruction, the migration thread is + * scheduled to run stop machine only after the + * excution of intructions to be replaced is + * complete. + */ + if (IS_ENABLED(CONFIG_PREEMPTION) || + (func->force == KLP_NORMAL_FORCE) || + check_jump_insn(func_addr)) { + ret = add_func_to_list(check_funcs, &pcheck, func_addr, func_size, func->old_name, func->force); - if (ret) - return ret; + if (ret) + return ret; + } } else { /* * When disable, check for the previously @@ -229,6 +233,14 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, func_node = klp_find_func_node(func->old_func); if (!func_node) return -EINVAL; +#ifdef CONFIG_PREEMPTION + /* + * No scheduling point in the replacement + * instructions. Therefore, when preemption is + * not enabled, atomic execution is performed + * and these instructions will not appear on + * the stack. + */ if (list_is_singular(&func_node->func_stack)) { func_addr = (unsigned long)func->old_func; func_size = func->old_size; @@ -241,14 +253,15 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, func_addr = (unsigned long)prev->new_func; func_size = prev->new_size; } - ret = add_func_to_list(other_funcs, &pother, + ret = add_func_to_list(check_funcs, &pcheck, func_addr, func_size, func->old_name, 0); if (ret) return ret; +#endif func_addr = (unsigned long)func->new_func; func_size = func->new_size; - ret = add_func_to_list(other_funcs, &pother, + ret = add_func_to_list(check_funcs, &pcheck, func_addr, func_size, func->old_name, 0); if (ret) @@ -275,9 +288,9 @@ static bool check_func_list(struct klp_func_list *funcs, int *ret, unsigned long static int klp_check_jump_func(struct stackframe *frame, void *data) { struct walk_stackframe_args *args = data; - struct klp_func_list *other_funcs = args->other_funcs; + struct klp_func_list *check_funcs = args->check_funcs;
- return check_func_list(other_funcs, &args->ret, frame->pc); + return check_func_list(check_funcs, &args->ret, frame->pc); }
static void free_list(struct klp_func_list **funcs) @@ -296,16 +309,15 @@ int klp_check_calltrace(struct klp_patch *patch, int enable) struct task_struct *g, *t; struct stackframe frame; int ret = 0; - struct klp_func_list *nojump_funcs = NULL; - struct klp_func_list *other_funcs = NULL; + struct klp_func_list *check_funcs = NULL; struct walk_stackframe_args args = { .ret = 0 };
- ret = klp_check_activeness_func(patch, enable, &nojump_funcs, &other_funcs); + ret = klp_check_activeness_func(patch, enable, &check_funcs); if (ret) goto out; - args.other_funcs = other_funcs; + args.check_funcs = check_funcs;
for_each_process_thread(g, t) { if (t == current) { @@ -330,12 +342,7 @@ int klp_check_calltrace(struct klp_patch *patch, int enable) frame.lr = 0; /* recovered from the stack */ frame.pc = thread_saved_pc(t); } - if (!check_func_list(nojump_funcs, &ret, frame.pc)) { - pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); - show_stack(t, NULL, KERN_INFO); - goto out; - } - if (other_funcs != NULL) { + if (check_funcs != NULL) { walk_stackframe(&frame, klp_check_jump_func, &args); if (args.ret) { ret = args.ret; @@ -347,8 +354,7 @@ int klp_check_calltrace(struct klp_patch *patch, int enable) }
out: - free_list(&nojump_funcs); - free_list(&other_funcs); + free_list(&check_funcs); return ret; } #endif diff --git a/arch/arm64/kernel/livepatch.c b/arch/arm64/kernel/livepatch.c index 2ffbdfbe87de..e60841e975c6 100644 --- a/arch/arm64/kernel/livepatch.c +++ b/arch/arm64/kernel/livepatch.c @@ -99,7 +99,7 @@ struct klp_func_list {
struct walk_stackframe_args { int enable; - struct klp_func_list *other_funcs; + struct klp_func_list *check_funcs; int ret; };
@@ -162,16 +162,14 @@ static int add_func_to_list(struct klp_func_list **funcs, struct klp_func_list * }
static int klp_check_activeness_func(struct klp_patch *patch, int enable, - struct klp_func_list **nojump_funcs, - struct klp_func_list **other_funcs) + struct klp_func_list **check_funcs) { int ret; struct klp_object *obj; struct klp_func *func; unsigned long func_addr, func_size; struct klp_func_node *func_node; - struct klp_func_list *pnjump = NULL; - struct klp_func_list *pother = NULL; + struct klp_func_list *pcheck = NULL;
for (obj = patch->objs; obj->funcs; obj++) { for (func = obj->funcs; func->old_name; func++) { @@ -200,17 +198,23 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, func_addr = (unsigned long)prev->new_func; func_size = prev->new_size; } - if ((func->force == KLP_STACK_OPTIMIZE) && - !check_jump_insn(func_addr)) - ret = add_func_to_list(nojump_funcs, &pnjump, - func_addr, func_size, - func->old_name, func->force); - else - ret = add_func_to_list(other_funcs, &pother, + /* + * When preemption is disabled and the + * replacement area does not contain a jump + * instruction, the migration thread is + * scheduled to run stop machine only after the + * excution of instructions to be replaced is + * complete. + */ + if (IS_ENABLED(CONFIG_PREEMPTION) || + (func->force == KLP_NORMAL_FORCE) || + check_jump_insn(func_addr)) { + ret = add_func_to_list(check_funcs, &pcheck, func_addr, func_size, func->old_name, func->force); - if (ret) - return ret; + if (ret) + return ret; + } } else { /* * When disable, check for the previously @@ -221,6 +225,14 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, if (!func_node) { return -EINVAL; } +#ifdef CONFIG_PREEMPTION + /* + * No scheduling point in the replacement + * instructions. Therefore, when preemption is + * not enabled, atomic execution is performed + * and these instructions will not appear on + * the stack. + */ if (list_is_singular(&func_node->func_stack)) { func_addr = (unsigned long)func->old_func; func_size = func->old_size; @@ -233,15 +245,16 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, func_addr = (unsigned long)prev->new_func; func_size = prev->new_size; } - ret = add_func_to_list(other_funcs, &pother, + ret = add_func_to_list(check_funcs, &pcheck, func_addr, func_size, func->old_name, 0); if (ret) return ret; +#endif
func_addr = (unsigned long)func->new_func; func_size = func->new_size; - ret = add_func_to_list(other_funcs, &pother, + ret = add_func_to_list(check_funcs, &pcheck, func_addr, func_size, func->old_name, 0); if (ret) @@ -268,9 +281,9 @@ static bool check_func_list(struct klp_func_list *funcs, int *ret, unsigned long static bool klp_check_jump_func(void *data, unsigned long pc) { struct walk_stackframe_args *args = data; - struct klp_func_list *other_funcs = args->other_funcs; + struct klp_func_list *check_funcs = args->check_funcs;
- return check_func_list(other_funcs, &args->ret, pc); + return check_func_list(check_funcs, &args->ret, pc); }
static void free_list(struct klp_func_list **funcs) @@ -289,17 +302,16 @@ int klp_check_calltrace(struct klp_patch *patch, int enable) struct task_struct *g, *t; struct stackframe frame; int ret = 0; - struct klp_func_list *nojump_funcs = NULL; - struct klp_func_list *other_funcs = NULL; + struct klp_func_list *check_funcs = NULL; struct walk_stackframe_args args = { .enable = enable, .ret = 0 };
- ret = klp_check_activeness_func(patch, enable, &nojump_funcs, &other_funcs); + ret = klp_check_activeness_func(patch, enable, &check_funcs); if (ret) goto out; - args.other_funcs = other_funcs; + args.check_funcs = check_funcs;
for_each_process_thread(g, t) { /* @@ -328,12 +340,7 @@ int klp_check_calltrace(struct klp_patch *patch, int enable) frame.fp = thread_saved_fp(t); frame.pc = thread_saved_pc(t); } - if (!check_func_list(nojump_funcs, &ret, frame.pc)) { - pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); - show_stack(t, NULL, KERN_INFO); - goto out; - } - if (other_funcs != NULL) { + if (check_funcs != NULL) { start_backtrace(&frame, frame.fp, frame.pc); walk_stackframe(t, &frame, klp_check_jump_func, &args); if (args.ret) { @@ -346,8 +353,7 @@ int klp_check_calltrace(struct klp_patch *patch, int enable) }
out: - free_list(&nojump_funcs); - free_list(&other_funcs); + free_list(&check_funcs); return ret; } #endif diff --git a/arch/powerpc/kernel/livepatch_32.c b/arch/powerpc/kernel/livepatch_32.c index ea153f52e9ad..13f3200bf52f 100644 --- a/arch/powerpc/kernel/livepatch_32.c +++ b/arch/powerpc/kernel/livepatch_32.c @@ -91,7 +91,7 @@ struct stackframe {
struct walk_stackframe_args { int enable; - struct klp_func_list *other_funcs; + struct klp_func_list *check_funcs; int ret; };
@@ -154,16 +154,14 @@ static int add_func_to_list(struct klp_func_list **funcs, struct klp_func_list * }
static int klp_check_activeness_func(struct klp_patch *patch, int enable, - struct klp_func_list **nojump_funcs, - struct klp_func_list **other_funcs) + struct klp_func_list **check_funcs) { int ret; struct klp_object *obj; struct klp_func *func; unsigned long func_addr, func_size; struct klp_func_node *func_node; - struct klp_func_list *pnjump = NULL; - struct klp_func_list *pother = NULL; + struct klp_func_list *pcheck = NULL;
for (obj = patch->objs; obj->funcs; obj++) { for (func = obj->funcs; func->old_name; func++) { @@ -196,17 +194,23 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, func_addr = (unsigned long)prev->new_func; func_size = prev->new_size; } - if ((func->force == KLP_STACK_OPTIMIZE) && - !check_jump_insn(func_addr)) - ret = add_func_to_list(nojump_funcs, &pnjump, - func_addr, func_size, - func->old_name, func->force); - else - ret = add_func_to_list(other_funcs, &pother, + /* + * When preemtion is disabled and the + * replacement area does not contain a jump + * instruction, the migration thread is + * scheduled to run stop machine only after the + * excution of instructions to be replaced is + * complete. + */ + if (IS_ENABLED(CONFIG_PREEMPTION) || + (func->force == KLP_NORMAL_FORCE) || + check_jump_insn(func_addr)) { + ret = add_func_to_list(check_funcs, &pcheck, func_addr, func_size, func->old_name, func->force); - if (ret) - return ret; + if (ret) + return ret; + } } else { /* * When disable, check for the previously @@ -216,6 +220,14 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, func_node = klp_find_func_node(func->old_func); if (!func_node) return -EINVAL; +#ifdef CONFIG_PREEMPTION + /* + * No scheduling point in the replacement + * instructions. Therefore, when preemption is + * not enabled, atomic execution is performed + * and these instructions will not appear on + * the stack. + */ if (list_is_singular(&func_node->func_stack)) { func_addr = (unsigned long)func->old_func; func_size = func->old_size; @@ -228,13 +240,14 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, func_addr = (unsigned long)prev->new_func; func_size = prev->new_size; } - ret = add_func_to_list(other_funcs, &pother, func_addr, + ret = add_func_to_list(check_funcs, &pcheck, func_addr, func_size, func->old_name, 0); if (ret) return ret; +#endif func_addr = (unsigned long)func->new_func; func_size = func->new_size; - ret = add_func_to_list(other_funcs, &pother, func_addr, + ret = add_func_to_list(check_funcs, &pcheck, func_addr, func_size, func->old_name, 0); if (ret) return ret; @@ -289,9 +302,9 @@ static bool check_func_list(struct klp_func_list *funcs, int *ret, unsigned long static int klp_check_jump_func(struct stackframe *frame, void *data) { struct walk_stackframe_args *args = data; - struct klp_func_list *other_funcs = args->other_funcs; + struct klp_func_list *check_funcs = args->check_funcs;
- if (!check_func_list(other_funcs, &args->ret, frame->pc)) { + if (!check_func_list(check_funcs, &args->ret, frame->pc)) { return args->ret; } return 0; @@ -314,16 +327,15 @@ int klp_check_calltrace(struct klp_patch *patch, int enable) struct stackframe frame; unsigned long *stack; int ret = 0; - struct klp_func_list *nojump_funcs = NULL; - struct klp_func_list *other_funcs = NULL; + struct klp_func_list *check_funcs = NULL; struct walk_stackframe_args args = { .ret = 0 };
- ret = klp_check_activeness_func(patch, enable, &nojump_funcs, &other_funcs); + ret = klp_check_activeness_func(patch, enable, &check_funcs); if (ret) goto out; - args.other_funcs = other_funcs; + args.check_funcs = check_funcs;
for_each_process_thread(g, t) { if (t == current) { @@ -362,12 +374,7 @@ int klp_check_calltrace(struct klp_patch *patch, int enable)
frame.sp = (unsigned long)stack; frame.pc = stack[STACK_FRAME_LR_SAVE]; - if (!check_func_list(nojump_funcs, &ret, frame.pc)) { - pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); - show_stack(t, NULL, KERN_INFO); - goto out; - } - if (other_funcs != NULL) { + if (check_funcs != NULL) { klp_walk_stackframe(&frame, klp_check_jump_func, t, &args); if (args.ret) { ret = args.ret; @@ -379,8 +386,7 @@ int klp_check_calltrace(struct klp_patch *patch, int enable) }
out: - free_list(&nojump_funcs); - free_list(&other_funcs); + free_list(&check_funcs); return ret; } #endif diff --git a/arch/powerpc/kernel/livepatch_64.c b/arch/powerpc/kernel/livepatch_64.c index 09e8bb330606..77fbb5603137 100644 --- a/arch/powerpc/kernel/livepatch_64.c +++ b/arch/powerpc/kernel/livepatch_64.c @@ -101,7 +101,7 @@ struct stackframe {
struct walk_stackframe_args { int enable; - struct klp_func_list *other_funcs; + struct klp_func_list *check_funcs; int ret; };
@@ -164,16 +164,14 @@ static int add_func_to_list(struct klp_func_list **funcs, struct klp_func_list * }
static int klp_check_activeness_func(struct klp_patch *patch, int enable, - struct klp_func_list **nojump_funcs, - struct klp_func_list **other_funcs) + struct klp_func_list **check_funcs) { int ret; struct klp_object *obj; struct klp_func *func; unsigned long func_addr, func_size; struct klp_func_node *func_node = NULL; - struct klp_func_list *pnjump = NULL; - struct klp_func_list *pother = NULL; + struct klp_func_list *pcheck = NULL;
for (obj = patch->objs; obj->funcs; obj++) { for (func = obj->funcs; func->old_name; func++) { @@ -209,17 +207,23 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, (void *)prev->new_func); func_size = prev->new_size; } - if ((func->force == KLP_STACK_OPTIMIZE) && - !check_jump_insn(func_addr)) - ret = add_func_to_list(nojump_funcs, &pnjump, - func_addr, func_size, - func->old_name, func->force); - else - ret = add_func_to_list(other_funcs, &pother, + /* + * When preemption is disabled and the + * replacement area does not contain a jump + * instruction, the migration thread is + * scheduled to run stop machine only after the + * excution of instructions to be repalced is + * complete. + */ + if (IS_ENABLED(CONFIG_PREEMTION) || + (func->force == KLP_NORMAL_FORCE) || + check_jump_insn(func_addr)) { + ret = add_func_to_list(check_funcs, &pcheck, func_addr, func_size, func->old_name, func->force); - if (ret) - return ret; + if (ret) + return ret; + } } else { /* * When disable, check for the function itself @@ -228,7 +232,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, func_addr = ppc_function_entry( (void *)func->new_func); func_size = func->new_size; - ret = add_func_to_list(other_funcs, &pother, func_addr, + ret = add_func_to_list(check_funcs, &pcheck, func_addr, func_size, func->old_name, 0); if (ret) return ret; @@ -244,12 +248,21 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, * so, we should check all the func in the callchain */ if (func_addr != (unsigned long)func->old_func) { +#ifdef CONFIG_PREEMPTION + /* + * No scheduling point in the replacement + * instructions. Therefore, when preemption is + * not enabled, atomic execution is performed + * and these instructions will not appear on + * the stack. + */ func_addr = (unsigned long)func->old_func; func_size = func->old_size; - ret = add_func_to_list(other_funcs, &pother, func_addr, + ret = add_func_to_list(check_funcs, &pcheck, func_addr, func_size, "OLD_FUNC", 0); if (ret) return ret; +#endif
if (func_node == NULL || func_node->trampoline.magic != BRANCH_TRAMPOLINE_MAGIC) @@ -257,7 +270,7 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable,
func_addr = (unsigned long)&func_node->trampoline; func_size = sizeof(struct ppc64_klp_btramp_entry); - ret = add_func_to_list(other_funcs, &pother, func_addr, + ret = add_func_to_list(check_funcs, &pcheck, func_addr, func_size, "trampoline", 0); if (ret) return ret; @@ -343,9 +356,9 @@ static bool check_func_list(struct klp_func_list *funcs, int *ret, unsigned long static int klp_check_jump_func(struct stackframe *frame, void *data) { struct walk_stackframe_args *args = data; - struct klp_func_list *other_funcs = args->other_funcs; + struct klp_func_list *check_funcs = args->check_funcs;
- if (!check_func_list(other_funcs, &args->ret, frame->pc)) { + if (!check_func_list(check_funcs, &args->ret, frame->pc)) { return args->ret; } return 0; @@ -368,14 +381,13 @@ int klp_check_calltrace(struct klp_patch *patch, int enable) struct stackframe frame; unsigned long *stack; int ret = 0; - struct klp_func_list *nojump_funcs = NULL; - struct klp_func_list *other_funcs = NULL; + struct klp_func_list *check_funcs = NULL; struct walk_stackframe_args args;
- ret = klp_check_activeness_func(patch, enable, &nojump_funcs, &other_funcs); + ret = klp_check_activeness_func(patch, enable, &check_funcs); if (ret) goto out; - args.other_funcs = other_funcs; + args.check_funcs = check_funcs; args.ret = 0;
for_each_process_thread(g, t) { @@ -418,14 +430,7 @@ int klp_check_calltrace(struct klp_patch *patch, int enable) frame.sp = (unsigned long)stack; frame.pc = stack[STACK_FRAME_LR_SAVE]; frame.nip = 0; - if (!check_func_list(nojump_funcs, &ret, frame.pc)) { - pr_debug("%s FAILED when %s\n", __func__, - enable ? "enabling" : "disabling"); - pr_info("PID: %d Comm: %.20s\n", t->pid, t->comm); - show_stack(t, NULL, KERN_INFO); - goto out; - } - if (other_funcs != NULL) { + if (check_funcs != NULL) { klp_walk_stackframe(&frame, klp_check_jump_func, t, &args); if (args.ret) { ret = args.ret; @@ -439,8 +444,7 @@ int klp_check_calltrace(struct klp_patch *patch, int enable) }
out: - free_list(&nojump_funcs); - free_list(&other_funcs); + free_list(&check_funcs); return ret; } #endif diff --git a/arch/x86/kernel/livepatch.c b/arch/x86/kernel/livepatch.c index bca152b67818..386d224d5890 100644 --- a/arch/x86/kernel/livepatch.c +++ b/arch/x86/kernel/livepatch.c @@ -148,17 +148,14 @@ static int add_func_to_list(struct klp_func_list **funcs, struct klp_func_list * }
static int klp_check_activeness_func(struct klp_patch *patch, int enable, - struct klp_func_list **nojump_funcs, - struct klp_func_list **other_funcs) + struct klp_func_list **check_funcs) { int ret; struct klp_object *obj; struct klp_func *func; unsigned long func_addr, func_size; struct klp_func_node *func_node = NULL; - struct klp_func_list *pnojump = NULL; - struct klp_func_list *pother = NULL; - + struct klp_func_list *pcheck = NULL;
for (obj = patch->objs; obj->funcs; obj++) { for (func = obj->funcs; func->old_name; func++) { @@ -189,17 +186,23 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, func_addr = (unsigned long)prev->new_func; func_size = prev->new_size; } - if ((func->force == KLP_STACK_OPTIMIZE) && - !check_jump_insn(func_addr)) - ret = add_func_to_list(nojump_funcs, &pnojump, - func_addr, func_size, - func->old_name, func->force); - else - ret = add_func_to_list(other_funcs, &pother, + /* + * When preemtion is disabled and the + * replacement area does not contain a jump + * instruction, the migration thread is + * scheduled to run stop machine only after the + * excution of instructions to be replaced is + * complete. + */ + if (IS_ENABLED(CONFIG_PREEMPTION) || + (func->force == KLP_NORMAL_FORCE) || + check_jump_insn(func_addr)) { + ret = add_func_to_list(check_funcs, &pcheck, func_addr, func_size, func->old_name, func->force); - if (ret) - return ret; + if (ret) + return ret; + } } else { /* * When disable, check for the function @@ -207,6 +210,14 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, */ if (!func_node) return -EINVAL; +#ifdef CONFIG_PREEMPTION + /* + * No scheduling point in the replacement + * instructions. Therefore, when preemption is + * not enabled, atomic execution is performed + * and these instructions will not appear on + * the stack. + */ if (list_is_singular(&func_node->func_stack)) { func_addr = (unsigned long)func->old_func; func_size = func->old_size; @@ -219,14 +230,15 @@ static int klp_check_activeness_func(struct klp_patch *patch, int enable, func_addr = (unsigned long)prev->new_func; func_size = prev->new_size; } - ret = add_func_to_list(other_funcs, &pother, func_addr, + ret = add_func_to_list(check_funcs, &pcheck, func_addr, func_size, func->old_name, 0); if (ret) return ret; +#endif
func_addr = (unsigned long)func->new_func; func_size = func->new_size; - ret = add_func_to_list(other_funcs, &pother, func_addr, + ret = add_func_to_list(check_funcs, &pcheck, func_addr, func_size, func->old_name, 0); if (ret) return ret; @@ -281,7 +293,7 @@ static bool check_func_list(struct klp_func_list *funcs, int *ret, unsigned long }
static int klp_check_stack(void *trace_ptr, int trace_len, - struct klp_func_list *other_funcs) + struct klp_func_list *check_funcs) { #ifdef CONFIG_ARCH_STACKWALK unsigned long *trace = trace_ptr; @@ -298,7 +310,7 @@ static int klp_check_stack(void *trace_ptr, int trace_len, for (i = 0; i < trace->nr_entries; i++) { address = trace->entries[i]; #endif - if (!check_func_list(other_funcs, &ret, address)) { + if (!check_func_list(check_funcs, &ret, address)) { #ifdef CONFIG_ARCH_STACKWALK klp_print_stack_trace(trace_ptr, trace_len); #else @@ -326,8 +338,7 @@ int klp_check_calltrace(struct klp_patch *patch, int enable) { struct task_struct *g, *t; int ret = 0; - struct klp_func_list *nojump_funcs = NULL; - struct klp_func_list *other_funcs = NULL; + struct klp_func_list *check_funcs = NULL; static unsigned long trace_entries[MAX_STACK_ENTRIES]; #ifdef CONFIG_ARCH_STACKWALK int trace_len; @@ -335,7 +346,7 @@ int klp_check_calltrace(struct klp_patch *patch, int enable) struct stack_trace trace; #endif
- ret = klp_check_activeness_func(patch, enable, &nojump_funcs, &other_funcs); + ret = klp_check_activeness_func(patch, enable, &check_funcs); if (ret) goto out; for_each_process_thread(g, t) { @@ -362,26 +373,16 @@ int klp_check_calltrace(struct klp_patch *patch, int enable) goto out; } #ifdef CONFIG_ARCH_STACKWALK - if (!check_func_list(nojump_funcs, &ret, trace_entries[0])) { - klp_print_stack_trace(&trace_entries, trace_len); -#else - if (!check_func_list(nojump_funcs, &ret, trace->entries[0])) { - klp_print_stack_trace(&trace, 0); -#endif - goto out; - } -#ifdef CONFIG_ARCH_STACKWALK - ret = klp_check_stack(trace_entries, trace_len, other_funcs); + ret = klp_check_stack(trace_entries, trace_len, check_funcs); #else - ret = klp_check_stack(&trace, 0, other_funcs); + ret = klp_check_stack(&trace, 0, check_funcs); #endif if (ret) goto out; }
out: - free_list(&nojump_funcs); - free_list(&other_funcs); + free_list(&check_funcs); return ret; } #endif
From: Robin Murphy robin.murphy@arm.com
mainline inclusion from mainline-5.14-rc2 commit 295cf156231ca3f9e3a66bde7fab5e09c41835e0 category: bugfix bugzilla: 55085 https://gitee.com/openeuler/kernel/issues/I4DDEL
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
---------------------------
Al reminds us that the usercopy API must only return complete failure if absolutely nothing could be copied. Currently, if userspace does something silly like giving us an unaligned pointer to Device memory, or a size which overruns MTE tag bounds, we may fail to honour that requirement when faulting on a multi-byte access even though a smaller access could have succeeded.
Add a mitigation to the fixup routines to fall back to a single-byte copy if we faulted on a larger access before anything has been written to the destination, to guarantee making *some* forward progress. We needn't be too concerned about the overall performance since this should only occur when callers are doing something a bit dodgy in the first place. Particularly broken userspace might still be able to trick generic_perform_write() into an infinite loop by targeting write() at an mmap() of some read-only device register where the fault-in load succeeds but any store synchronously aborts such that copy_to_user() is genuinely unable to make progress, but, well, don't do that...
CC: stable@vger.kernel.org Reported-by: Chen Huang chenhuang5@huawei.com Suggested-by: Al Viro viro@zeniv.linux.org.uk Reviewed-by: Catalin Marinas catalin.marinas@arm.com Signed-off-by: Robin Murphy robin.murphy@arm.com Link: https://lore.kernel.org/r/dc03d5c675731a1f24a62417dba5429ad744234e.162609843... Signed-off-by: Will Deacon will@kernel.org
Conflicts: arch/arm64/lib/copy_from_user.S arch/arm64/lib/copy_in_user.S arch/arm64/lib/copy_to_user.S Signed-off-by: Chen Huang chenhuang5@huawei.com Reviewed-by: Chen Wandun chenwandun@huawei.com
Signed-off-by: Chen Jun chenjun102@huawei.com --- arch/arm64/lib/copy_from_user.S | 13 ++++++++++--- arch/arm64/lib/copy_in_user.S | 21 ++++++++++++++------- arch/arm64/lib/copy_to_user.S | 14 +++++++++++--- 3 files changed, 35 insertions(+), 13 deletions(-)
diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S index 0f8a3a9e3795..957a6d092d7a 100644 --- a/arch/arm64/lib/copy_from_user.S +++ b/arch/arm64/lib/copy_from_user.S @@ -29,7 +29,7 @@ .endm
.macro ldrh1 reg, ptr, val - uao_user_alternative 9998f, ldrh, ldtrh, \reg, \ptr, \val + uao_user_alternative 9997f, ldrh, ldtrh, \reg, \ptr, \val .endm
.macro strh1 reg, ptr, val @@ -37,7 +37,7 @@ .endm
.macro ldr1 reg, ptr, val - uao_user_alternative 9998f, ldr, ldtr, \reg, \ptr, \val + uao_user_alternative 9997f, ldr, ldtr, \reg, \ptr, \val .endm
.macro str1 reg, ptr, val @@ -45,7 +45,7 @@ .endm
.macro ldp1 reg1, reg2, ptr, val - uao_ldp 9998f, \reg1, \reg2, \ptr, \val + uao_ldp 9997f, \reg1, \reg2, \ptr, \val .endm
.macro stp1 reg1, reg2, ptr, val @@ -53,8 +53,10 @@ .endm
end .req x5 +srcin .req x15 SYM_FUNC_START(__arch_copy_from_user) add end, x0, x2 + mov srcin, x1 #include "copy_template.S" mov x0, #0 // Nothing to copy ret @@ -63,6 +65,11 @@ EXPORT_SYMBOL(__arch_copy_from_user)
.section .fixup,"ax" .align 2 +9997: cmp dst, dstin + b.ne 9998f + // Before being absolutely sure we couldn't copy anything, try harder +USER(9998f, ldtrb tmp1w, [srcin]) + strb tmp1w, [dst], #1 9998: sub x0, end, dst // bytes not copied ret .previous diff --git a/arch/arm64/lib/copy_in_user.S b/arch/arm64/lib/copy_in_user.S index 80e37ada0ee1..35c01da09323 100644 --- a/arch/arm64/lib/copy_in_user.S +++ b/arch/arm64/lib/copy_in_user.S @@ -30,33 +30,34 @@ .endm
.macro ldrh1 reg, ptr, val - uao_user_alternative 9998f, ldrh, ldtrh, \reg, \ptr, \val + uao_user_alternative 9997f, ldrh, ldtrh, \reg, \ptr, \val .endm
.macro strh1 reg, ptr, val - uao_user_alternative 9998f, strh, sttrh, \reg, \ptr, \val + uao_user_alternative 9997f, strh, sttrh, \reg, \ptr, \val .endm
.macro ldr1 reg, ptr, val - uao_user_alternative 9998f, ldr, ldtr, \reg, \ptr, \val + uao_user_alternative 9997f, ldr, ldtr, \reg, \ptr, \val .endm
.macro str1 reg, ptr, val - uao_user_alternative 9998f, str, sttr, \reg, \ptr, \val + uao_user_alternative 9997f, str, sttr, \reg, \ptr, \val .endm
.macro ldp1 reg1, reg2, ptr, val - uao_ldp 9998f, \reg1, \reg2, \ptr, \val + uao_ldp 9997f, \reg1, \reg2, \ptr, \val .endm
.macro stp1 reg1, reg2, ptr, val - uao_stp 9998f, \reg1, \reg2, \ptr, \val + uao_stp 9997f, \reg1, \reg2, \ptr, \val .endm
end .req x5 - +srcin .req x15 SYM_FUNC_START(__arch_copy_in_user) add end, x0, x2 + mov srcin, x1 #include "copy_template.S" mov x0, #0 ret @@ -65,6 +66,12 @@ EXPORT_SYMBOL(__arch_copy_in_user)
.section .fixup,"ax" .align 2 +9997: cmp dst, dstin + b.ne 9998f + // Before being absolutely sure we couldn't copy anything, try harder +USER(9998f, ldtrb tmp1w, [srcin]) +USER(9998f, sttrb tmp1w, [dst]) + add dst, dst, #1 9998: sub x0, end, dst // bytes not copied ret .previous diff --git a/arch/arm64/lib/copy_to_user.S b/arch/arm64/lib/copy_to_user.S index 4ec59704b8f2..85705350ff35 100644 --- a/arch/arm64/lib/copy_to_user.S +++ b/arch/arm64/lib/copy_to_user.S @@ -32,7 +32,7 @@ .endm
.macro strh1 reg, ptr, val - uao_user_alternative 9998f, strh, sttrh, \reg, \ptr, \val + uao_user_alternative 9997f, strh, sttrh, \reg, \ptr, \val .endm
.macro ldr1 reg, ptr, val @@ -40,7 +40,7 @@ .endm
.macro str1 reg, ptr, val - uao_user_alternative 9998f, str, sttr, \reg, \ptr, \val + uao_user_alternative 9997f, str, sttr, \reg, \ptr, \val .endm
.macro ldp1 reg1, reg2, ptr, val @@ -48,12 +48,14 @@ .endm
.macro stp1 reg1, reg2, ptr, val - uao_stp 9998f, \reg1, \reg2, \ptr, \val + uao_stp 9997f, \reg1, \reg2, \ptr, \val .endm
end .req x5 +srcin .req x15 SYM_FUNC_START(__arch_copy_to_user) add end, x0, x2 + mov srcin, x1 #include "copy_template.S" mov x0, #0 ret @@ -62,6 +64,12 @@ EXPORT_SYMBOL(__arch_copy_to_user)
.section .fixup,"ax" .align 2 +9997: cmp dst, dstin + b.ne 9998f + // Before being absolutely sure we couldn't copy anything, try harder + ldrb tmp1w, [srcin] +USER(9998f, sttrb tmp1w, [dst]) + add dst, dst, #1 9998: sub x0, end, dst // bytes not copied ret .previous
From: Dietmar Eggemann dietmar.eggemann@arm.com
mainline inclusion from mainline-v5.12-rc1 commit 71e5f6644fb2f3304fcb310145ded234a37e7cc1 category: bugfix bugzilla: 182847 https://gitee.com/openeuler/kernel/issues/I4DDEL
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
----------------------------------------------------------
Commit "sched/topology: Make sched_init_numa() use a set for the deduplicating sort" allocates 'i + nr_levels (level)' instead of 'i + nr_levels + 1' sched_domain_topology_level.
This led to an Oops (on Arm64 juno with CONFIG_SCHED_DEBUG):
sched_init_domains build_sched_domains() __free_domain_allocs() __sdt_free() { ... for_each_sd_topology(tl) ... sd = *per_cpu_ptr(sdd->sd, j); <-- ... }
Signed-off-by: Dietmar Eggemann dietmar.eggemann@arm.com Signed-off-by: Peter Zijlstra (Intel) peterz@infradead.org Signed-off-by: Ingo Molnar mingo@kernel.org Tested-by: Vincent Guittot vincent.guittot@linaro.org Tested-by: Barry Song song.bao.hua@hisilicon.com Link: https://lkml.kernel.org/r/6000e39e-7d28-c360-9cd6-8798fd22a9bf@arm.com Fixes: 620a6dc40754 ("sched/topology: Make sched_init_numa() use a set for the deduplicating sort") Signed-off-by: Jialin Zhang zhangjialin11@huawei.com Reviewed-by: Cheng Jian cj.chengjian@huawei.com
Signed-off-by: Chen Jun chenjun102@huawei.com --- kernel/sched/topology.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c index 8bb65e09ed06..84e173e1cc0f 100644 --- a/kernel/sched/topology.c +++ b/kernel/sched/topology.c @@ -1775,7 +1775,7 @@ void sched_init_numa(void) /* Compute default topology size */ for (i = 0; sched_domain_topology[i].mask; i++);
- tl = kzalloc((i + nr_levels) * + tl = kzalloc((i + nr_levels + 1) * sizeof(struct sched_domain_topology_level), GFP_KERNEL); if (!tl) return;
From: Thelford Williams tdwilliamsiv@gmail.com
maillist inclusion category: bugfix bugzilla: 182978 https://gitee.com/openeuler/kernel/issues/I4DDEL CVE: CVE-2021-42327
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
---------------------------
Size can be any value and is user controlled resulting in overwriting the 40 byte array wr_buf with an arbitrary length of data from buf.
Signed-off-by: Thelford Williams tdwilliamsiv@gmail.com Signed-off-by: Alex Deucher alexander.deucher@amd.com Signed-off-by: Chen Jun chenjun102@huawei.com Reviewed-by: weiyang wang wangweiyang2@huawei.com Signed-off-by: Chen Jun chenjun102@huawei.com --- drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c index fbb65c95464b..e43f82bcb231 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_debugfs.c @@ -264,7 +264,7 @@ static ssize_t dp_link_settings_write(struct file *f, const char __user *buf, if (!wr_buf) return -ENOSPC;
- if (parse_write_buffer_into_params(wr_buf, size, + if (parse_write_buffer_into_params(wr_buf, wr_buf_size, (long *)param, buf, max_param_num, ¶m_nums)) {