From: Ma Wupeng mawupeng1@huawei.com
Add machine check safe support for cow & migrate_pages. Avoid kernel panic during arm64_do_kernel_sea if no valid info is reported.
Changelog since v1: - fix incorrect patch format
Kefeng Wang (2): mm: support poison recovery from copy_present_page() mm: support poison recovery from do_cow_fault()
Liu Shixin (1): mm: hwpoison: support recovery from HugePage copy-on-write faults
Ma Wupeng (2): arm64: mm: Add copy mc support for all migrate_page arm64: send sig fault for user task when apei_claim_sea fails
Tong Tiangen (1): make copy_[user]_highpage_mc have return value
arch/arm64/include/asm/page.h | 6 +-- arch/arm64/lib/copy_page_mc.S | 6 ++- arch/arm64/mm/copypage.c | 21 +++++--- arch/arm64/mm/fault.c | 17 +++++-- include/linux/highmem.h | 13 ++++- include/linux/mm.h | 8 ++-- mm/hugetlb.c | 7 ++- mm/memory.c | 90 +++++++++++++++++++++++------------ mm/migrate.c | 5 +- 9 files changed, 118 insertions(+), 55 deletions(-)
From: Tong Tiangen tongtiangen@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IB0OV7
--------------------------------
There is a mechanism in the kernel to recover from uncorrectable memory errors, ARCH_HAS_COPY_MC(eg, Machine Check Safe Memory Copy on x86), which is already used in NVDIMM or core-mm paths(eg, CoW, khugepaged, coredump, ksm copy), see copy_mc_to_{user,kernel}, copy_[user]_highpage_mc callers.
These two callers should return value to check whether the copy is successful. In the current implementation, copy_[user]_highpage_mc is not have return value.
This patch adds return value for copy_[user]_highpage_mc.
Signed-off-by: Tong Tiangen tongtiangen@huawei.com --- arch/arm64/include/asm/page.h | 6 +++--- arch/arm64/lib/copy_page_mc.S | 6 +++++- arch/arm64/mm/copypage.c | 21 +++++++++++++++------ include/linux/highmem.h | 13 +++++++++++-- mm/memory.c | 20 ++++++++++++-------- 5 files changed, 46 insertions(+), 20 deletions(-)
diff --git a/arch/arm64/include/asm/page.h b/arch/arm64/include/asm/page.h index 09b898a3e57c..a616a85cdb93 100644 --- a/arch/arm64/include/asm/page.h +++ b/arch/arm64/include/asm/page.h @@ -29,12 +29,12 @@ void copy_highpage(struct page *to, struct page *from); #define __HAVE_ARCH_COPY_HIGHPAGE
#ifdef CONFIG_ARCH_HAS_COPY_MC -extern void copy_page_mc(void *to, const void *from); -void copy_highpage_mc(struct page *to, struct page *from); +extern int copy_page_mc(void *to, const void *from); +int copy_highpage_mc(struct page *to, struct page *from); int copy_mc_highpage(struct page *to, struct page *from); #define __HAVE_ARCH_COPY_HIGHPAGE_MC
-void copy_user_highpage_mc(struct page *to, struct page *from, +int copy_user_highpage_mc(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma); #define __HAVE_ARCH_COPY_USER_HIGHPAGE_MC #endif diff --git a/arch/arm64/lib/copy_page_mc.S b/arch/arm64/lib/copy_page_mc.S index 8d4b9159fa8a..697d11f5a30a 100644 --- a/arch/arm64/lib/copy_page_mc.S +++ b/arch/arm64/lib/copy_page_mc.S @@ -74,7 +74,11 @@ CPY_MC(9998f, stnp x12, x13, [x0, #80 - 256]) CPY_MC(9998f, stnp x14, x15, [x0, #96 - 256]) CPY_MC(9998f, stnp x16, x17, [x0, #112 - 256])
-9998: ret + mov x0, #0 + ret + +9998: mov x0, #-EFAULT + ret
SYM_FUNC_END(copy_page_mc) EXPORT_SYMBOL(copy_page_mc) diff --git a/arch/arm64/mm/copypage.c b/arch/arm64/mm/copypage.c index 0696820d72ab..51d46ac4475e 100644 --- a/arch/arm64/mm/copypage.c +++ b/arch/arm64/mm/copypage.c @@ -44,21 +44,30 @@ void copy_user_highpage(struct page *to, struct page *from, EXPORT_SYMBOL_GPL(copy_user_highpage);
#ifdef CONFIG_ARCH_HAS_COPY_MC -void copy_highpage_mc(struct page *to, struct page *from) +int copy_highpage_mc(struct page *to, struct page *from) { void *kto = page_address(to); void *kfrom = page_address(from); + int ret; + + ret = copy_page_mc(kto, kfrom); + if (!ret) + do_mte(to, from, kto, kfrom, true);
- copy_page_mc(kto, kfrom); - do_mte(to, from, kto, kfrom, true); + return ret; } EXPORT_SYMBOL(copy_highpage_mc);
-void copy_user_highpage_mc(struct page *to, struct page *from, +int copy_user_highpage_mc(struct page *to, struct page *from, unsigned long vaddr, struct vm_area_struct *vma) { - copy_highpage_mc(to, from); - flush_dcache_page(to); + int ret; + + ret = copy_highpage_mc(to, from); + if (!ret) + flush_dcache_page(to); + + return ret; } EXPORT_SYMBOL_GPL(copy_user_highpage_mc);
diff --git a/include/linux/highmem.h b/include/linux/highmem.h index ebfee2b672d3..336781ec8453 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -342,7 +342,12 @@ static inline void copy_user_highpage(struct page *to, struct page *from, #endif
#ifndef __HAVE_ARCH_COPY_USER_HIGHPAGE_MC -#define copy_user_highpage_mc copy_user_highpage +static inline int copy_user_highpage_mc(struct page *to, struct page *from, + unsigned long vaddr, struct vm_area_struct *vma) +{ + copy_user_highpage(to, from, vaddr, vma); + return 0; +} #endif
#ifndef __HAVE_ARCH_COPY_HIGHPAGE @@ -361,7 +366,11 @@ static inline void copy_highpage(struct page *to, struct page *from) #endif
#ifndef __HAVE_ARCH_COPY_HIGHPAGE_MC -#define copy_highpage_mc copy_highpage +static inline int copy_highpage_mc(struct page *to, struct page *from) +{ + copy_highpage(to, from); + return 0; +} #endif
#ifndef __HAVE_ARCH_COPY_HUGEPAGES diff --git a/mm/memory.c b/mm/memory.c index af9cb48630bd..0b71917f87c0 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -2672,10 +2672,10 @@ static inline int pte_unmap_same(struct mm_struct *mm, pmd_t *pmd, return same; }
-static inline bool cow_user_page(struct page *dst, struct page *src, +static inline int cow_user_page(struct page *dst, struct page *src, struct vm_fault *vmf) { - bool ret; + int ret; void *kaddr; void __user *uaddr; bool locked = false; @@ -2684,7 +2684,8 @@ static inline bool cow_user_page(struct page *dst, struct page *src, unsigned long addr = vmf->address;
if (likely(src)) { - copy_user_highpage_mc(dst, src, addr, vma); + if (copy_user_highpage_mc(dst, src, addr, vma)) + return -EHWPOISON; return true; }
@@ -2712,7 +2713,7 @@ static inline bool cow_user_page(struct page *dst, struct page *src, * and update local tlb only */ update_mmu_tlb(vma, addr, vmf->pte); - ret = false; + ret = -EAGAIN; goto pte_unlock; }
@@ -2737,7 +2738,7 @@ static inline bool cow_user_page(struct page *dst, struct page *src, if (!likely(pte_same(*vmf->pte, vmf->orig_pte))) { /* The PTE changed under us, update local tlb */ update_mmu_tlb(vma, addr, vmf->pte); - ret = false; + ret = -EAGAIN; goto pte_unlock; }
@@ -2756,7 +2757,7 @@ static inline bool cow_user_page(struct page *dst, struct page *src, } }
- ret = true; + ret = 0;
pte_unlock: if (locked) @@ -2932,12 +2933,15 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) if (!new_page) goto oom; } else { + int err; + new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, vmf->address); if (!new_page) goto oom;
- if (!cow_user_page(new_page, old_page, vmf)) { + err = cow_user_page(new_page, old_page, vmf); + if (!err || err == -EHWPOISON) { /* * COW failed, if the fault was solved by other, * it's fine. If not, userspace would re-fault on @@ -2947,7 +2951,7 @@ static vm_fault_t wp_page_copy(struct vm_fault *vmf) put_page(new_page); if (old_page) put_page(old_page); - return 0; + return err == -EHWPOISON ? VM_FAULT_HWPOISON : 0; } }
From: Liu Shixin liushixin2@huawei.com
mainline inclusion from mainline-v6.4-rc1 commit 1cb9dc4b475c7418f925ab0c97b6750007d9f52e category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IB0OV7
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
copy-on-write of hugetlb user pages with uncorrectable errors will result in a kernel crash. This is because the copy is performed in kernel mode and in general we can not handle accessing memory with such errors while in kernel mode. Commit a873dfe1032a ("mm, hwpoison: try to recover from copy-on write faults") introduced the routine copy_user_highpage_mc() to gracefully handle copying of user pages with uncorrectable errors. However, the separate hugetlb copy-on-write code paths were not modified as part of commit a873dfe1032a.
Modify hugetlb copy-on-write code paths to use copy_mc_user_highpage() so that they can also gracefully handle uncorrectable errors in user pages. This involves changing the hugetlb specific routine copy_user_large_folio() from type void to int so that it can return an error. Modify the hugetlb userfaultfd code in the same way so that it can return -EHWPOISON if it encounters an uncorrectable error.
Link: https://lkml.kernel.org/r/20230413131349.2524210-1-liushixin2@huawei.com Signed-off-by: Liu Shixin liushixin2@huawei.com Acked-by: Mike Kravetz mike.kravetz@oracle.com Reviewed-by: Naoya Horiguchi naoya.horiguchi@nec.com Cc: Miaohe Lin linmiaohe@huawei.com Cc: Muchun Song muchun.song@linux.dev Cc: Tony Luck tony.luck@intel.com Signed-off-by: Andrew Morton akpm@linux-foundation.org
Conflicts: include/linux/mm.h mm/hugetlb.c mm/memory.c [Ma Wupeng: current branch don't need folio & memory_failure_queue] Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- include/linux/mm.h | 8 +++---- mm/hugetlb.c | 7 ++++-- mm/memory.c | 53 +++++++++++++++++++++++++++++----------------- 3 files changed, 42 insertions(+), 26 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h index 627f997bc547..00bc6978391b 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -3222,10 +3222,10 @@ enum mf_action_page_type { extern void clear_huge_page(struct page *page, unsigned long addr_hint, unsigned int pages_per_huge_page); -extern void copy_user_huge_page(struct page *dst, struct page *src, - unsigned long addr_hint, - struct vm_area_struct *vma, - unsigned int pages_per_huge_page); +extern int copy_user_huge_page(struct page *dst, struct page *src, + unsigned long addr_hint, + struct vm_area_struct *vma, + unsigned int pages_per_huge_page); extern long copy_huge_page_from_user(struct page *dst_page, const void __user *usr_src, unsigned int pages_per_huge_page, diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 5f04adac38bb..4f4773bd5393 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -4728,8 +4728,11 @@ static vm_fault_t hugetlb_cow(struct mm_struct *mm, struct vm_area_struct *vma, goto out_release_all; }
- copy_user_huge_page(new_page, old_page, address, vma, - pages_per_huge_page(h)); + if (copy_user_huge_page(new_page, old_page, address, vma, + pages_per_huge_page(h))) { + ret = VM_FAULT_HWPOISON_LARGE; + goto out_release_all; + } __SetPageUptodate(new_page);
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, vma, mm, haddr, diff --git a/mm/memory.c b/mm/memory.c index 0b71917f87c0..a4b8b1d47a3b 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -5287,12 +5287,12 @@ EXPORT_SYMBOL(__might_fault); * operation. The target subpage will be processed last to keep its * cache lines hot. */ -static inline void process_huge_page( +static inline int process_huge_page( unsigned long addr_hint, unsigned int pages_per_huge_page, - void (*process_subpage)(unsigned long addr, int idx, void *arg), + int (*process_subpage)(unsigned long addr, int idx, void *arg), void *arg) { - int i, n, base, l; + int i, n, base, l, ret; unsigned long addr = addr_hint & ~(((unsigned long)pages_per_huge_page << PAGE_SHIFT) - 1);
@@ -5306,7 +5306,9 @@ static inline void process_huge_page( /* Process subpages at the end of huge page */ for (i = pages_per_huge_page - 1; i >= 2 * n; i--) { cond_resched(); - process_subpage(addr + i * PAGE_SIZE, i, arg); + ret = process_subpage(addr + i * PAGE_SIZE, i, arg); + if (ret) + return ret; } } else { /* If target subpage in second half of huge page */ @@ -5315,7 +5317,9 @@ static inline void process_huge_page( /* Process subpages at the begin of huge page */ for (i = 0; i < base; i++) { cond_resched(); - process_subpage(addr + i * PAGE_SIZE, i, arg); + ret = process_subpage(addr + i * PAGE_SIZE, i, arg); + if (ret) + return ret; } } /* @@ -5327,10 +5331,15 @@ static inline void process_huge_page( int right_idx = base + 2 * l - 1 - i;
cond_resched(); - process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg); + ret = process_subpage(addr + left_idx * PAGE_SIZE, left_idx, arg); + if (ret) + return ret; cond_resched(); - process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg); + ret = process_subpage(addr + right_idx * PAGE_SIZE, right_idx, arg); + if (ret) + return ret; } + return 0; }
static void clear_gigantic_page(struct page *page, @@ -5348,11 +5357,12 @@ static void clear_gigantic_page(struct page *page, } }
-static void clear_subpage(unsigned long addr, int idx, void *arg) +static int clear_subpage(unsigned long addr, int idx, void *arg) { struct page *page = arg;
clear_user_highpage(page + idx, addr); + return 0; }
void clear_huge_page(struct page *page, @@ -5369,7 +5379,7 @@ void clear_huge_page(struct page *page, process_huge_page(addr_hint, pages_per_huge_page, clear_subpage, page); }
-static void copy_user_gigantic_page(struct page *dst, struct page *src, +static int copy_user_gigantic_page(struct page *dst, struct page *src, unsigned long addr, struct vm_area_struct *vma, unsigned int pages_per_huge_page) @@ -5380,12 +5390,14 @@ static void copy_user_gigantic_page(struct page *dst, struct page *src,
for (i = 0; i < pages_per_huge_page; ) { cond_resched(); - copy_user_highpage(dst, src, addr + i*PAGE_SIZE, vma); + if (copy_user_highpage_mc(dst, src, addr + i*PAGE_SIZE, vma)) + return -EHWPOISON;
i++; dst = mem_map_next(dst, dst_base, i); src = mem_map_next(src, src_base, i); } + return 0; }
struct copy_subpage_arg { @@ -5394,15 +5406,18 @@ struct copy_subpage_arg { struct vm_area_struct *vma; };
-static void copy_subpage(unsigned long addr, int idx, void *arg) +static int copy_subpage(unsigned long addr, int idx, void *arg) { struct copy_subpage_arg *copy_arg = arg;
- copy_user_highpage(copy_arg->dst + idx, copy_arg->src + idx, - addr, copy_arg->vma); + if (copy_user_highpage_mc(copy_arg->dst + idx, copy_arg->src + idx, + addr, copy_arg->vma)) + return -EHWPOISON; + + return 0; }
-void copy_user_huge_page(struct page *dst, struct page *src, +int copy_user_huge_page(struct page *dst, struct page *src, unsigned long addr_hint, struct vm_area_struct *vma, unsigned int pages_per_huge_page) { @@ -5414,13 +5429,11 @@ void copy_user_huge_page(struct page *dst, struct page *src, .vma = vma, };
- if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) { - copy_user_gigantic_page(dst, src, addr, vma, - pages_per_huge_page); - return; - } + if (unlikely(pages_per_huge_page > MAX_ORDER_NR_PAGES)) + return copy_user_gigantic_page(dst, src, addr, vma, + pages_per_huge_page);
- process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg); + return process_huge_page(addr_hint, pages_per_huge_page, copy_subpage, &arg); }
long copy_huge_page_from_user(struct page *dst_page,
From: Kefeng Wang wangkefeng.wang@huawei.com
mainline inclusion from mainline-v6.12-rc1 commit 658be46520ce480a44fe405730a1725166298f27 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IB0OV7
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
Similar to other poison recovery, use copy_mc_user_highpage() to avoid potentially kernel panic during copy page in copy_present_page() from fork, once copy failed due to hwpoison in source page, we need to break out of copy in copy_pte_range() and release prealloc folio, so copy_mc_user_highpage() is moved ahead before set *prealloc to NULL.
Link: https://lkml.kernel.org/r/20240906024201.1214712-3-wangkefeng.wang@huawei.co... Signed-off-by: Kefeng Wang wangkefeng.wang@huawei.com Reviewed-by: Jane Chu jane.chu@oracle.com Reviewed-by: Miaohe Lin linmiaohe@huawei.com Cc: David Hildenbrand david@redhat.com Cc: Jiaqi Yan jiaqiyan@google.com Cc: Naoya Horiguchi nao.horiguchi@gmail.com Cc: Tony Luck tony.luck@intel.com Signed-off-by: Andrew Morton akpm@linux-foundation.org
Conflicts: mm/memory.c [Ma Wupeng: copy_pte_range don't need to handle case -EBUSY] Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- mm/memory.c | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c index a4b8b1d47a3b..c364158a5889 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -844,8 +844,11 @@ copy_present_page(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma * We have a prealloc page, all good! Take it * over and copy the page & arm it. */ + + if (copy_user_highpage_mc(new_page, page, addr, src_vma)) + return -EHWPOISON; + *prealloc = NULL; - copy_user_highpage(new_page, page, addr, src_vma); __SetPageUptodate(new_page); reliable_page_counter(new_page, dst_vma->vm_mm, 1); page_add_new_anon_rmap(new_page, dst_vma, addr, false); @@ -996,8 +999,9 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, /* * If we need a pre-allocated page for this pte, drop the * locks, allocate, and try again. + * If copy failed due to hwpoison in source page, break out. */ - if (unlikely(ret == -EAGAIN)) + if (unlikely(ret == -EAGAIN || ret == -EHWPOISON)) break; if (unlikely(prealloc)) { /* @@ -1025,6 +1029,8 @@ copy_pte_range(struct vm_area_struct *dst_vma, struct vm_area_struct *src_vma, goto out; } entry.val = 0; + } else if (unlikely(ret == -EHWPOISON)) { + goto out; } else if (ret) { WARN_ON_ONCE(ret != -EAGAIN); prealloc = page_copy_prealloc(src_mm, src_vma, addr);
From: Kefeng Wang wangkefeng.wang@huawei.com
mainline inclusion from mainline-v6.12-rc1 commit aa549f923f5e037a459dcd588932db9abfa8c158 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IB0OV7
Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i...
--------------------------------
Patch series "mm: hwpoison: two more poison recovery".
One more CoW path to support poison recorvery in do_cow_fault(), and the last copy_user_highpage() user is replaced to copy_mc_user_highpage() from copy_present_page() during fork to support poison recorvery too.
This patch (of 2):
Like commit a873dfe1032a ("mm, hwpoison: try to recover from copy-on write faults"), there is another path which could crash because it does not have recovery code where poison is consumed by the kernel in do_cow_fault(), a crash calltrace shown below on old kernel, but it could be happened in the lastest mainline code,
CPU: 7 PID: 3248 Comm: mpi Kdump: loaded Tainted: G OE 5.10.0 #1 pc : copy_page+0xc/0xbc lr : copy_user_highpage+0x50/0x9c Call trace: copy_page+0xc/0xbc do_cow_fault+0x118/0x2bc do_fault+0x40/0x1a4 handle_pte_fault+0x154/0x230 __handle_mm_fault+0x1a8/0x38c handle_mm_fault+0xf0/0x250 do_page_fault+0x184/0x454 do_translation_fault+0xac/0xd4 do_mem_abort+0x44/0xbc
Fix it by using copy_mc_user_highpage() to handle this case and return VM_FAULT_HWPOISON for cow fault.
[wangkefeng.wang@huawei.com: unlock/put vmf->page, per Miaohe] Link: https://lkml.kernel.org/r/20240910021541.234300-1-wangkefeng.wang@huawei.com Link: https://lkml.kernel.org/r/20240906024201.1214712-1-wangkefeng.wang@huawei.co... Link: https://lkml.kernel.org/r/20240906024201.1214712-2-wangkefeng.wang@huawei.co... Signed-off-by: Kefeng Wang wangkefeng.wang@huawei.com Reviewed-by: Jane Chu jane.chu@oracle.com Reviewed-by: Miaohe Lin linmiaohe@huawei.com Cc: David Hildenbrand david@redhat.com Cc: Jiaqi Yan jiaqiyan@google.com Cc: Naoya Horiguchi nao.horiguchi@gmail.com Cc: Tony Luck tony.luck@intel.com Signed-off-by: Andrew Morton akpm@linux-foundation.org
Conflicts: mm/memory.c [Ma Wupeng: context conflicts, 5.10 don't have folio] Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- mm/memory.c | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-)
diff --git a/mm/memory.c b/mm/memory.c index c364158a5889..20869d0cd5a1 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -4276,10 +4276,15 @@ static vm_fault_t do_cow_fault(struct vm_fault *vmf) if (ret & VM_FAULT_DONE_COW) return ret;
- copy_user_highpage(vmf->cow_page, vmf->page, vmf->address, vma); + if (copy_user_highpage_mc(vmf->cow_page, vmf->page, vmf->address, vma)) { + ret = VM_FAULT_HWPOISON; + goto unlock; + } + __SetPageUptodate(vmf->cow_page);
ret |= finish_fault(vmf); +unlock: unlock_page(vmf->page); put_page(vmf->page); if (unlikely(ret & (VM_FAULT_ERROR | VM_FAULT_NOPAGE | VM_FAULT_RETRY)))
From: Ma Wupeng mawupeng1@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IB0OV7
--------------------------------
Commit 45dbef4c04f6 ("mm: page_eject: Add mc support during offline page") brings mcs support for migrate_page, however this is only enabled for feature page eject which is only enabled in arm64. Since this support can works in any scenarios, remove the restriction(PF_MCS) and make it works for all migrate_page in arm64.
Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- mm/migrate.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-)
diff --git a/mm/migrate.c b/mm/migrate.c index cff5e11437d9..cf8c05ea821e 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -743,9 +743,8 @@ int migrate_page_extra(struct address_space *mapping,
BUG_ON(PageWriteback(page)); /* Writeback must be complete */
- if (unlikely(IS_ENABLED(CONFIG_ARCH_HAS_COPY_MC) && - (current->flags & PF_MCS) && - (mode != MIGRATE_SYNC_NO_COPY))) + if (IS_ENABLED(CONFIG_ARM64) && IS_ENABLED(CONFIG_ARCH_HAS_COPY_MC) && + (mode != MIGRATE_SYNC_NO_COPY)) return migrate_page_mc_extra(mapping, newpage, page, mode, extra_count);
From: Ma Wupeng mawupeng1@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/IB0OV7
--------------------------------
During arm64_do_kernel_sea, kernel will panic if no info is reported to kernel since apei_claim_sea will fail. However, for user task, sig fault can be send to which can avoid kernel panic.
Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- arch/arm64/mm/fault.c | 17 ++++++++++++++--- 1 file changed, 14 insertions(+), 3 deletions(-)
diff --git a/arch/arm64/mm/fault.c b/arch/arm64/mm/fault.c index a2c61725c176..67f7ae98af56 100644 --- a/arch/arm64/mm/fault.c +++ b/arch/arm64/mm/fault.c @@ -729,6 +729,9 @@ static int do_bad(unsigned long addr, unsigned int esr, struct pt_regs *regs) static bool arm64_do_kernel_sea(void __user *addr, unsigned int esr, struct pt_regs *regs, int sig, int code) { + unsigned long pc; + int err; + if (!IS_ENABLED(CONFIG_ARCH_HAS_COPY_MC)) return false;
@@ -738,12 +741,20 @@ static bool arm64_do_kernel_sea(void __user *addr, unsigned int esr, if (user_mode(regs)) return false;
- if (apei_claim_sea(regs) < 0) - return false; - + pc = regs->pc; if (!fixup_exception_mc(regs)) return false;
+ err = apei_claim_sea(regs); + if (err < 0) { + pr_emerg("apei claim sea failed. addr: %#lx, esr: %#x\n", + (unsigned long)addr, esr); + if (!current->mm) { + regs->pc = pc; + return false; + } + } + if (current->flags & PF_KTHREAD) return true;
反馈: 您发送到kernel@openeuler.org的补丁/补丁集,已成功转换为PR! PR链接地址: https://gitee.com/openeuler/kernel/pulls/13078 邮件列表地址:https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/M...
FeedBack: The patch(es) which you have sent to kernel@openeuler.org mailing list has been converted to a pull request successfully! Pull request link: https://gitee.com/openeuler/kernel/pulls/13078 Mailing list address: https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/M...