From: Tong Tiangen tongtiangen@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I5GB28 CVE: NA
-------------------------------
In the cow(copy on write) processing, the data of the user process is copied, when hardware memory error is encountered during copy, only the relevant processes are affected, so killing the user process and isolate the user page with hardware memory errors is a more reasonable choice than kernel panic.
Add new helper copy_page_mc() which provide a page copy implementation with machine check safe. At present, only used in cow. In future, we can expand more scenes. As long as the consequences of page copy failure are not fatal(eg: only affect user process), we can use this helper.
The copy_page_mc() in copy_page_mc.S is largely borrows from copy_page() in copy_page.S and the main difference is copy_page_mc() add extable entry to every load/store insn to support machine check safe. largely to keep the patch simple. If needed those optimizations can be folded in.
Signed-off-by: Tong Tiangen tongtiangen@huawei.com --- arch/arm64/include/asm/assembler.h | 5 ++ arch/arm64/include/asm/mte.h | 4 ++ arch/arm64/include/asm/page.h | 10 ++++ arch/arm64/lib/Makefile | 2 + arch/arm64/lib/copy_page_mc.S | 80 ++++++++++++++++++++++++++++++ arch/arm64/lib/mte.S | 19 +++++++ arch/arm64/mm/copypage.c | 43 +++++++++++++--- include/linux/highmem.h | 8 +++ mm/memory.c | 2 +- 9 files changed, 165 insertions(+), 8 deletions(-) create mode 100644 arch/arm64/lib/copy_page_mc.S
diff --git a/arch/arm64/include/asm/assembler.h b/arch/arm64/include/asm/assembler.h index 68fb54519d67..5e6bacda05d8 100644 --- a/arch/arm64/include/asm/assembler.h +++ b/arch/arm64/include/asm/assembler.h @@ -168,6 +168,11 @@ alternative_endif #define USER_MC(l, x...) \ 9999: x; \ _asm_mc_extable 9999b, l + +#define CPY_MC(l, x...) \ +9999: x; \ + _asm_mc_extable 9999b, l + /* * Register aliases. */ diff --git a/arch/arm64/include/asm/mte.h b/arch/arm64/include/asm/mte.h index 1c99fcadb58c..eeeb74fa2206 100644 --- a/arch/arm64/include/asm/mte.h +++ b/arch/arm64/include/asm/mte.h @@ -37,6 +37,7 @@ void mte_free_tag_storage(char *storage);
void mte_sync_tags(pte_t *ptep, pte_t pte); void mte_copy_page_tags(void *kto, const void *kfrom); +void mte_copy_page_tags_mc(void *kto, const void *kfrom); void flush_mte_state(void); void mte_thread_switch(struct task_struct *next); void mte_suspend_exit(void); @@ -56,6 +57,9 @@ static inline void mte_sync_tags(pte_t *ptep, pte_t pte) static inline void mte_copy_page_tags(void *kto, const void *kfrom) { } +static inline void mte_copy_page_tags_mc(void *kto, const void *kfrom) +{ +} static inline void flush_mte_state(void) { } diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index 012cffc574e8..4d3ba27b96cb 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -28,6 +28,16 @@ void copy_user_highpage(struct page *to, struct page *from, void copy_highpage(struct page *to, struct page *from); #define __HAVE_ARCH_COPY_HIGHPAGE
+#ifdef CONFIG_ARCH_HAS_COPY_MC +extern void copy_page_mc(void *to, const void *from); +void copy_highpage_mc(struct page *to, struct page *from); +#define __HAVE_ARCH_COPY_HIGHPAGE_MC + +void copy_user_highpage_mc(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma); +#define __HAVE_ARCH_COPY_USER_HIGHPAGE_MC +#endif + #define __alloc_zeroed_user_highpage(movableflags, vma, vaddr) \ alloc_page_vma(GFP_HIGHUSER | __GFP_ZERO | movableflags, vma, vaddr) #define __HAVE_ARCH_ALLOC_ZEROED_USER_HIGHPAGE diff --git a/arch/arm64/lib/Makefile b/arch/arm64/lib/Makefile index d31e1169d9b8..d35d99771016 100644 --- a/arch/arm64/lib/Makefile +++ b/arch/arm64/lib/Makefile @@ -13,6 +13,8 @@ endif
lib-$(CONFIG_ARCH_HAS_UACCESS_FLUSHCACHE) += uaccess_flushcache.o
+lib-$(CONFIG_ARCH_HAS_COPY_MC) += copy_page_mc.o + obj-$(CONFIG_CRC32) += crc32.o
obj-$(CONFIG_FUNCTION_ERROR_INJECTION) += error-inject.o diff --git a/arch/arm64/lib/copy_page_mc.S b/arch/arm64/lib/copy_page_mc.S new file mode 100644 index 000000000000..8d4b9159fa8a --- /dev/null +++ b/arch/arm64/lib/copy_page_mc.S @@ -0,0 +1,80 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2012 ARM Ltd. + */ + +#include <linux/linkage.h> +#include <linux/const.h> +#include <asm/assembler.h> +#include <asm/page.h> +#include <asm/cpufeature.h> +#include <asm/alternative.h> + +/* + * Copy a page from src to dest (both are page aligned) with machine check + * + * Parameters: + * x0 - dest + * x1 - src + */ +SYM_FUNC_START(copy_page_mc) +alternative_if ARM64_HAS_NO_HW_PREFETCH + // Prefetch three cache lines ahead. + prfm pldl1strm, [x1, #128] + prfm pldl1strm, [x1, #256] + prfm pldl1strm, [x1, #384] +alternative_else_nop_endif + +CPY_MC(9998f, ldp x2, x3, [x1]) +CPY_MC(9998f, ldp x4, x5, [x1, #16]) +CPY_MC(9998f, ldp x6, x7, [x1, #32]) +CPY_MC(9998f, ldp x8, x9, [x1, #48]) +CPY_MC(9998f, ldp x10, x11, [x1, #64]) +CPY_MC(9998f, ldp x12, x13, [x1, #80]) +CPY_MC(9998f, ldp x14, x15, [x1, #96]) +CPY_MC(9998f, ldp x16, x17, [x1, #112]) + + add x0, x0, #256 + add x1, x1, #128 +1: + tst x0, #(PAGE_SIZE - 1) + +alternative_if ARM64_HAS_NO_HW_PREFETCH + prfm pldl1strm, [x1, #384] +alternative_else_nop_endif + +CPY_MC(9998f, stnp x2, x3, [x0, #-256]) +CPY_MC(9998f, ldp x2, x3, [x1]) +CPY_MC(9998f, stnp x4, x5, [x0, #16 - 256]) +CPY_MC(9998f, ldp x4, x5, [x1, #16]) +CPY_MC(9998f, stnp x6, x7, [x0, #32 - 256]) +CPY_MC(9998f, ldp x6, x7, [x1, #32]) +CPY_MC(9998f, stnp x8, x9, [x0, #48 - 256]) +CPY_MC(9998f, ldp x8, x9, [x1, #48]) +CPY_MC(9998f, stnp x10, x11, [x0, #64 - 256]) +CPY_MC(9998f, ldp x10, x11, [x1, #64]) +CPY_MC(9998f, stnp x12, x13, [x0, #80 - 256]) +CPY_MC(9998f, ldp x12, x13, [x1, #80]) +CPY_MC(9998f, stnp x14, x15, [x0, #96 - 256]) +CPY_MC(9998f, ldp x14, x15, [x1, #96]) +CPY_MC(9998f, stnp x16, x17, [x0, #112 - 256]) +CPY_MC(9998f, ldp x16, x17, [x1, #112]) + + add x0, x0, #128 + add x1, x1, #128 + + b.ne 1b + +CPY_MC(9998f, stnp x2, x3, [x0, #-256]) +CPY_MC(9998f, stnp x4, x5, [x0, #16 - 256]) +CPY_MC(9998f, stnp x6, x7, [x0, #32 - 256]) +CPY_MC(9998f, stnp x8, x9, [x0, #48 - 256]) +CPY_MC(9998f, stnp x10, x11, [x0, #64 - 256]) +CPY_MC(9998f, stnp x12, x13, [x0, #80 - 256]) +CPY_MC(9998f, stnp x14, x15, [x0, #96 - 256]) +CPY_MC(9998f, stnp x16, x17, [x0, #112 - 256]) + +9998: ret + +SYM_FUNC_END(copy_page_mc) +EXPORT_SYMBOL(copy_page_mc) diff --git a/arch/arm64/lib/mte.S b/arch/arm64/lib/mte.S index 351537c12f36..d19fb07e1272 100644 --- a/arch/arm64/lib/mte.S +++ b/arch/arm64/lib/mte.S @@ -54,6 +54,25 @@ SYM_FUNC_START(mte_copy_page_tags) ret SYM_FUNC_END(mte_copy_page_tags)
+/* + * Copy the tags from the source page to the destination one wiht machine check safe + * x0 - address of the destination page + * x1 - address of the source page + */ +SYM_FUNC_START(mte_copy_page_tags_mc) + mov x2, x0 + mov x3, x1 + multitag_transfer_size x5, x6 +1: +CPY_MC(2f, ldgm x4, [x3]) + stgm x4, [x2] + add x2, x2, x5 + add x3, x3, x5 + tst x2, #(PAGE_SIZE - 1) + b.ne 1b +2: ret +SYM_FUNC_END(mte_copy_page_tags_mc) + /* * Read tags from a user buffer (one tag per byte) and set the corresponding * tags at the given kernel address. Used by PTRACE_POKEMTETAGS. diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c index 70a71f38b6a9..b91ba89afe25 100644 --- a/arch/arm64/mm/copypage.c +++ b/arch/arm64/mm/copypage.c @@ -14,20 +14,29 @@ #include <asm/cpufeature.h> #include <asm/mte.h>
-void copy_highpage(struct page *to, struct page *from) +static void do_mte(struct page *to, struct page *from, void *kto, void *kfrom, bool mc) { - struct page *kto = page_address(to); - struct page *kfrom = page_address(from); - - copy_page(kto, kfrom); - if (system_supports_mte() && test_bit(PG_mte_tagged, &from->flags)) { set_bit(PG_mte_tagged, &to->flags); - mte_copy_page_tags(kto, kfrom); + if (mc) + mte_copy_page_tags_mc(kto, kfrom); + else + mte_copy_page_tags(kto, kfrom); } } + + +void copy_highpage(struct page *to, struct page *from) +{ + void *kto = page_address(to); + void *kfrom = page_address(from); + + copy_page(kto, kfrom); + do_mte(to, from, kto, kfrom, false); +} EXPORT_SYMBOL(copy_highpage);
+ void copy_user_highpage(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { @@ -35,3 +44,23 @@ void copy_user_highpage(struct page *to, struct page *from, flush_dcache_page(to); } EXPORT_SYMBOL_GPL(copy_user_highpage); + +#ifdef CONFIG_ARCH_HAS_COPY_MC +void copy_highpage_mc(struct page *to, struct page *from) +{ + void *kto = page_address(to); + void *kfrom = page_address(from); + + copy_page_mc(kto, kfrom); + do_mte(to, from, kto, kfrom, true); +} +EXPORT_SYMBOL(copy_highpage_mc); + +void copy_user_highpage_mc(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma) +{ + copy_highpage_mc(to, from); + flush_dcache_page(to); +} +EXPORT_SYMBOL_GPL(copy_user_highpage_mc); +#endif diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 6b27af8fe624..c3b75b4a8fc1 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -341,6 +341,10 @@ static inline void copy_user_highpage(struct page *to, struct page *from,
#endif
+#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE_MC +#define copy_user_highpage_mc copy_user_highpage +#endif + #ifndef __HAVE_ARCH_COPY_HIGHPAGE
static inline void copy_highpage(struct page *to, struct page *from) @@ -356,6 +360,10 @@ static inline void copy_highpage(struct page *to, struct page *from)
#endif
+#ifndef __HAVE_ARCH_COPY_HIGHPAGE_MC +#define copy_highpage_mc copy_highpage +#endif + #ifndef __HAVE_ARCH_COPY_HUGEPAGES
static inline void copy_highpages(struct page *to, struct page *from, int nr_pages) diff --git a/mm/memory.c b/mm/memory.c index 3667ec456ace..e69f47d25084 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2653,7 +2653,7 @@ static inline bool cow_user_page(struct page *dst, struct page *src, unsigned long addr = vmf->address;
if (likely(src)) { - copy_user_highpage(dst, src, addr, vma); + copy_user_highpage_mc(dst, src, addr, vma); return true; }