This patch add uce kernel recovery path support in copy_from_user.
Signed-off-by: Tong Tiangen tongtiangen@huawei.com --- arch/arm64/include/asm/exception.h | 1 + arch/arm64/lib/copy_from_user.S | 6 ++++++ arch/arm64/mm/fault.c | 17 +++++++++++++++-- kernel/sysctl.c | 3 ++- 4 files changed, 24 insertions(+), 3 deletions(-)
diff --git a/arch/arm64/include/asm/exception.h b/arch/arm64/include/asm/exception.h index 559d86ad9e5d..d0c8a1fda453 100644 --- a/arch/arm64/include/asm/exception.h +++ b/arch/arm64/include/asm/exception.h @@ -51,6 +51,7 @@ struct uce_kernel_recovery_info {
extern int copy_page_cow_sea_fallback(void); extern int copy_generic_read_sea_fallback(void); +extern int copy_from_user_sea_fallback(void); #endif
#endif /* __ASM_EXCEPTION_H */ diff --git a/arch/arm64/lib/copy_from_user.S b/arch/arm64/lib/copy_from_user.S index 7cd6eeaa216c..d1afb61df158 100644 --- a/arch/arm64/lib/copy_from_user.S +++ b/arch/arm64/lib/copy_from_user.S @@ -72,6 +72,12 @@ ENTRY(__arch_copy_from_user) uaccess_disable_not_uao x3, x4 mov x0, #0 // Nothing to copy ret + + .global copy_from_user_sea_fallback +copy_from_user_sea_fallback: + uaccess_disable_not_uao x3, x4 + mov x0, #-1 + ret ENDPROC(__arch_copy_from_user)
.section .fixup,"ax" diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index 08040fe73199..50e37f4097cc 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -660,9 +660,18 @@ static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs) int kernel_access_sea_recovery;
#define UCE_KER_REC_NUM ARRAY_SIZE(reco_info) +/* + * One entry corresponds to one scene, and the scene switch is controlled by the + * corresponding bit of kernel_access_sea_recovery + * (the first entry corresponds to bit0, the second entry corresponds to bit1...), + * and the switch is visible to the user, so the order of eatch entry here cannot + * be easily change. Now the maximum entry is limited by the type of variable + * kernel_access_sea_recovery. + */ static struct uce_kernel_recovery_info reco_info[] = { {copy_page_cow_sea_fallback, "copy_page_cow", (unsigned long)copy_page_cow, 0}, {copy_generic_read_sea_fallback, "__arch_copy_to_user_generic_read", (unsigned long)__arch_copy_to_user_generic_read, 0}, + {copy_from_user_sea_fallback, "__arch_copy_from_user", (unsigned long)__arch_copy_from_user, 0}, };
static int __init kernel_access_sea_recovery_init(void) @@ -769,6 +778,9 @@ static int is_in_kernel_recovery(unsigned int esr, struct pt_regs *regs) }
for (i = 0; i < UCE_KER_REC_NUM; i++) { + if (!((kernel_access_sea_recovery >> i) & 0x1)) + continue; + info = &reco_info[i]; if (info->fn && regs->pc >= info->addr && regs->pc < (info->addr + info->size)) { @@ -777,7 +789,8 @@ static int is_in_kernel_recovery(unsigned int esr, struct pt_regs *regs) } }
- pr_emerg("UCE: symbol is not match.\n"); + pr_emerg("UCE: symbol is not match or switch if off, kernel recovery %d.\n", + kernel_access_sea_recovery); return -EINVAL; } #endif @@ -847,7 +860,7 @@ static int do_sea(unsigned long addr, unsigned int esr, struct pt_regs *regs) "Uncorrected hardware memory use with kernel recovery in kernel-access\n", current); } else { - die("Uncorrected hardware memory error (kernel recovery on but not match idx) in kernel-access\n", + die("Uncorrected hardware memory error (not match idx or sence switch is off) in kernel-access\n", regs, esr); }
diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 35512e2ea8a3..702376d7a796 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -130,6 +130,7 @@ static int __maybe_unused two = 2; static int __maybe_unused three = 3; static int __maybe_unused four = 4; static int __maybe_unused five = 5; +static int __maybe_unused seven = 7; static unsigned long zero_ul; static unsigned long one_ul = 1; static unsigned long long_max = LONG_MAX; @@ -1280,7 +1281,7 @@ static struct ctl_table kern_table[] = { .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, - .extra2 = &three, + .extra2 = &seven, },
#endif