From: Tong Tiangen tongtiangen@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IB0OV7
--------------------------------
There is a mechanism in the kernel to recover from uncorrectable memory errors, ARCH_HAS_COPY_MC(eg, Machine Check Safe Memory Copy on x86), which is already used in NVDIMM or core-mm paths(eg, CoW, khugepaged, coredump, ksm copy), see copy_mc_to_{user,kernel}, copy_[user]_highpage_mc callers.
These two callers should return value to check whether the copy is successful. In the current implementation, copy_[user]_highpage_mc is not have return value.
This patch adds return value for copy_[user]_highpage_mc.
Signed-off-by: Tong Tiangen tongtiangen@huawei.com --- arch/arm64/include/asm/page.h | 6 +++--- arch/arm64/lib/copy_page_mc.S | 6 +++++- arch/arm64/mm/copypage.c | 21 +++++++++++++++------ include/linux/highmem.h | 13 +++++++++++-- mm/memory.c | 20 ++++++++++++-------- 5 files changed, 46 insertions(+), 20 deletions(-)
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index 09b898a3e57c..a616a85cdb93 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -29,12 +29,12 @@ void copy_highpage(struct page *to, struct page *from); #define __HAVE_ARCH_COPY_HIGHPAGE
#ifdef CONFIG_ARCH_HAS_COPY_MC -extern void copy_page_mc(void *to, const void *from); -void copy_highpage_mc(struct page *to, struct page *from); +extern int copy_page_mc(void *to, const void *from); +int copy_highpage_mc(struct page *to, struct page *from); int copy_mc_highpage(struct page *to, struct page *from); #define __HAVE_ARCH_COPY_HIGHPAGE_MC
-void copy_user_highpage_mc(struct page *to, struct page *from, +int copy_user_highpage_mc(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma); #define __HAVE_ARCH_COPY_USER_HIGHPAGE_MC #endif diff --git a/arch/arm64/lib/copy_page_mc.S b/arch/arm64/lib/copy_page_mc.S index 8d4b9159fa8a..697d11f5a30a 100644 --- a/arch/arm64/lib/copy_page_mc.S +++ b/arch/arm64/lib/copy_page_mc.S @@ -74,7 +74,11 @@ CPY_MC(9998f, stnp x12, x13, [x0, #80 - 256]) CPY_MC(9998f, stnp x14, x15, [x0, #96 - 256]) CPY_MC(9998f, stnp x16, x17, [x0, #112 - 256])
-9998: ret + mov x0, #0 + ret + +9998: mov x0, #-EFAULT + ret
SYM_FUNC_END(copy_page_mc) EXPORT_SYMBOL(copy_page_mc) diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c index 0696820d72ab..51d46ac4475e 100644 --- a/arch/arm64/mm/copypage.c +++ b/arch/arm64/mm/copypage.c @@ -44,21 +44,30 @@ void copy_user_highpage(struct page *to, struct page *from, EXPORT_SYMBOL_GPL(copy_user_highpage);
#ifdef CONFIG_ARCH_HAS_COPY_MC -void copy_highpage_mc(struct page *to, struct page *from) +int copy_highpage_mc(struct page *to, struct page *from) { void *kto = page_address(to); void *kfrom = page_address(from); + int ret; + + ret = copy_page_mc(kto, kfrom); + if (!ret) + do_mte(to, from, kto, kfrom, true);
- copy_page_mc(kto, kfrom); - do_mte(to, from, kto, kfrom, true); + return ret; } EXPORT_SYMBOL(copy_highpage_mc);
-void copy_user_highpage_mc(struct page *to, struct page *from, +int copy_user_highpage_mc(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { - copy_highpage_mc(to, from); - flush_dcache_page(to); + int ret; + + ret = copy_highpage_mc(to, from); + if (!ret) + flush_dcache_page(to); + + return ret; } EXPORT_SYMBOL_GPL(copy_user_highpage_mc);
diff --git a/include/linux/highmem.h b/include/linux/highmem.h index ebfee2b672d3..336781ec8453 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -342,7 +342,12 @@ static inline void copy_user_highpage(struct page *to, struct page *from, #endif
#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE_MC -#define copy_user_highpage_mc copy_user_highpage +static inline int copy_user_highpage_mc(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma) +{ + copy_user_highpage(to, from, vaddr, vma); + return 0; +} #endif
#ifndef __HAVE_ARCH_COPY_HIGHPAGE @@ -361,7 +366,11 @@ static inline void copy_highpage(struct page *to, struct page *from) #endif
#ifndef __HAVE_ARCH_COPY_HIGHPAGE_MC -#define copy_highpage_mc copy_highpage +static inline int copy_highpage_mc(struct page *to, struct page *from) +{ + copy_highpage(to, from); + return 0; +} #endif
#ifndef __HAVE_ARCH_COPY_HUGEPAGES diff --git a/mm/memory.c b/mm/memory.c index af9cb48630bd..0b71917f87c0 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2672,10 +2672,10 @@ static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, return same; }
-static inline bool cow_user_page(struct page *dst, struct page *src, +static inline int cow_user_page(struct page *dst, struct page *src, struct vm_fault *vmf) { - bool ret; + int ret; void *kaddr; void __user *uaddr; bool locked = false; @@ -2684,7 +2684,8 @@ static inline bool cow_user_page(struct page *dst, struct page *src, unsigned long addr = vmf->address;
if (likely(src)) { - copy_user_highpage_mc(dst, src, addr, vma); + if (copy_user_highpage_mc(dst, src, addr, vma)) + return -EHWPOISON; return true; }
@@ -2712,7 +2713,7 @@ static inline bool cow_user_page(struct page *dst, struct page *src, * and update local tlb only */ update_mmu_tlb(vma, addr, vmf->pte); - ret = false; + ret = -EAGAIN; goto pte_unlock; }
@@ -2737,7 +2738,7 @@ static inline bool cow_user_page(struct page *dst, struct page *src, if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { /* The PTE changed under us, update local tlb */ update_mmu_tlb(vma, addr, vmf->pte); - ret = false; + ret = -EAGAIN; goto pte_unlock; }
@@ -2756,7 +2757,7 @@ static inline bool cow_user_page(struct page *dst, struct page *src, } }
- ret = true; + ret = 0;
pte_unlock: if (locked) @@ -2932,12 +2933,15 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) if (!new_page) goto oom; } else { + int err; + new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address); if (!new_page) goto oom;
- if (!cow_user_page(new_page, old_page, vmf)) { + err = cow_user_page(new_page, old_page, vmf); + if (!err || err == -EHWPOISON) { /* * COW failed, if the fault was solved by other, * it's fine. If not, userspace would re-fault on @@ -2947,7 +2951,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) put_page(new_page); if (old_page) put_page(old_page); - return 0; + return err == -EHWPOISON ? VM_FAULT_HWPOISON : 0; } }