[RFC PATCH openEuler-1.0-LTS 0/4] add mcs support for migrate pages
Add mcs support for migrate page & support disabling soft offline for HugeTLB pages. Since UCE kernel recovery is needed by this. This should be enable with the following step: - echo 1 > /proc/sys/kernel/uce_kernel_recovery Disable soft offline support for hugetlb with the following step: - echo 3 > /proc/sys/vm/enable_soft_offline Jiaqi Yan (1): mm/memory-failure: userspace controls soft-offlining pages Kyle Meyer (1): mm/memory-failure: support disabling soft offline for HugeTLB pages Wupeng Ma (2): uce: add copy_mc_highpage{s} arm64: mm: Add copy mc support for migrate_page .../ABI/testing/sysfs-memory-page-offline | 3 + include/linux/highmem.h | 55 +++++++++++++ include/linux/mm.h | 1 + kernel/sysctl.c | 9 +++ mm/memory-failure.c | 25 +++++- mm/migrate.c | 79 ++++++++++++++++--- 6 files changed, 162 insertions(+), 10 deletions(-) -- 2.43.0
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID6J4B -------------------------------- Introduce copy_mc_highpage{s} to properly handle uncorrectable memory errors (UCE) during kernel page copies. Rather than panicking on hardware memory errors, the implementation now safely propagates the error condition. Signed-off-by: Wupeng Ma <mawupeng1@huawei.com> --- include/linux/highmem.h | 55 +++++++++++++++++++++++++++++++++++++++++ 1 file changed, 55 insertions(+) diff --git a/include/linux/highmem.h b/include/linux/highmem.h index 1fed918bb1e5..0baeb9112e63 100644 --- a/include/linux/highmem.h +++ b/include/linux/highmem.h @@ -267,6 +267,61 @@ static inline void copy_highpage(struct page *to, struct page *from) kunmap_atomic(vfrom); } +#ifdef CONFIG_UCE_KERNEL_RECOVERY +/* Return -EFAULT if there was a #MC during copy, otherwise 0 for success. */ +static inline int copy_mc_highpage(struct page *to, struct page *from) +{ + char *vfrom, *vto; + int ret; + + vfrom = kmap_atomic(from); + vto = kmap_atomic(to); + ret = copy_page_cow(vto, vfrom); + kunmap_atomic(vto); + kunmap_atomic(vfrom); + + return ret; +} + +/* Return -EFAULT if there was a #MC during copy, otherwise 0 for success. */ +static inline int copy_mc_highpages(struct page *to, struct page *from, + int nr_pages) +{ + int ret = 0; + int i; + + for (i = 0; i < nr_pages; i++) { + cond_resched(); + ret = copy_mc_highpage(to + i, from + i); + if (ret) + return -EFAULT; + } + + return ret; +} +#else +static inline int copy_mc_highpage(struct page *to, struct page *from) +{ + copy_highpage(to, from); + + return 0; +} + +/* Return -EFAULT if there was a #MC during copy, otherwise 0 for success. */ +static inline int copy_mc_highpages(struct page *to, struct page *from, + int nr_pages) +{ + int i; + + for (i = 0; i < nr_pages; i++) { + cond_resched(); + (void)copy_mc_highpage(to + i, from + i); + } + + return 0; +} +#endif + #endif #endif /* _LINUX_HIGHMEM_H */ -- 2.43.0
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID6J4B -------------------------------- When encountering memory faults during page migration on arm64 systems, this change ensures the faulting user process is terminated instead of causing a kernel panic. The implementation adds proper error handling for copy operations in migrate_page(). To enable this, bit 1 for uce_kernel_recovery should be enabled: - echo 1 > /proc/sys/kernel/uce_kernel_recovery Signed-off-by: Wupeng Ma <mawupeng1@huawei.com> --- mm/migrate.c | 79 ++++++++++++++++++++++++++++++++++++++++++++++------ 1 file changed, 70 insertions(+), 9 deletions(-) diff --git a/mm/migrate.c b/mm/migrate.c index f8c379a0b9b9..62e584841fa7 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -47,6 +47,7 @@ #include <linux/page_owner.h> #include <linux/sched/mm.h> #include <linux/ptrace.h> +#include <linux/highmem.h> #include <asm/tlbflush.h> @@ -640,24 +641,33 @@ int migrate_huge_page_move_mapping(struct address_space *mapping, * arithmetic will work across the entire page. We need something more * specialized. */ -static void __copy_gigantic_page(struct page *dst, struct page *src, - int nr_pages) +static int __copy_gigantic_page(struct page *dst, struct page *src, + int nr_pages, bool mc) { - int i; + int i, ret = 0; struct page *dst_base = dst; struct page *src_base = src; for (i = 0; i < nr_pages; ) { cond_resched(); - copy_highpage(dst, src); + + if (mc) { + ret = copy_mc_highpage(dst, src); + if (ret) + return -EFAULT; + } else { + copy_highpage(dst, src); + } i++; dst = mem_map_next(dst, dst_base, i); src = mem_map_next(src, src_base, i); } + + return ret; } -static void copy_huge_page(struct page *dst, struct page *src) +static int __copy_huge_page(struct page *dst, struct page *src, bool mc) { int i; int nr_pages; @@ -667,20 +677,32 @@ static void copy_huge_page(struct page *dst, struct page *src) struct hstate *h = page_hstate(src); nr_pages = pages_per_huge_page(h); - if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) { - __copy_gigantic_page(dst, src, nr_pages); - return; - } + if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) + return __copy_gigantic_page(dst, src, nr_pages, mc); } else { /* thp page */ BUG_ON(!PageTransHuge(src)); nr_pages = hpage_nr_pages(src); } + if (mc) + return copy_mc_highpages(src, src, nr_pages); + for (i = 0; i < nr_pages; i++) { cond_resched(); copy_highpage(dst + i, src + i); } + return 0; +} + +static int copy_huge_page(struct page *dst, struct page *src) +{ + return __copy_huge_page(dst, src, false); +} + +static int copy_mc_huge_page(struct page *dst, struct page *src) +{ + return __copy_huge_page(dst, src, true); } /* @@ -756,6 +778,38 @@ void migrate_page_copy(struct page *newpage, struct page *page) } EXPORT_SYMBOL(migrate_page_copy); +static int migrate_page_copy_mc(struct page *newpage, struct page *page) +{ + int rc; + + if (PageHuge(page) || PageTransHuge(page)) + rc = copy_mc_huge_page(newpage, page); + else + rc = copy_mc_highpage(newpage, page); + + return rc; +} + +static int migrate_page_mc_extra(struct address_space *mapping, + struct page *newpage, struct page *page, + enum migrate_mode mode, int extra_count) +{ + int rc; + + rc = migrate_page_copy_mc(newpage, page); + if (rc) + return rc; + + rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, + extra_count); + if (rc != MIGRATEPAGE_SUCCESS) + return rc; + + migrate_page_states(newpage, page); + + return rc; +} + /************************************************************ * Migration functions ***********************************************************/ @@ -774,6 +828,13 @@ int migrate_page(struct address_space *mapping, BUG_ON(PageWriteback(page)); /* Writeback must be complete */ +#ifdef CONFIG_UCE_KERNEL_RECOVERY + if (IS_ENABLED(CONFIG_ARM64) && + is_cow_kernel_recovery_enable() && + (mode != MIGRATE_SYNC_NO_COPY)) + return migrate_page_mc_extra(mapping, newpage, page, mode, 0); +#endif + rc = migrate_page_move_mapping(mapping, newpage, page, NULL, mode, 0); if (rc != MIGRATEPAGE_SUCCESS) -- 2.43.0
From: Jiaqi Yan <jiaqiyan@google.com> mainline inclusion from mainline-v6.11-rc1 commit 56374430c5dfcf6d4f1df79514f797b45fbd0485 category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID6J4B Reference: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?i... -------------------------------- Correctable memory errors are very common on servers with large amount of memory, and are corrected by ECC. Soft offline is kernel's additional recovery handling for memory pages having (excessive) corrected memory errors. Impacted page is migrated to a healthy page if inuse; the original page is discarded for any future use. The actual policy on whether (and when) to soft offline should be maintained by userspace, especially in case of an 1G HugeTLB page. Soft-offline dissolves the HugeTLB page, either in-use or free, into chunks of 4K pages, reducing HugeTLB pool capacity by 1 hugepage. If userspace has not acknowledged such behavior, it may be surprised when later failed to mmap hugepages due to lack of hugepages. In case of a transparent hugepage, it will be split into 4K pages as well; userspace will stop enjoying the transparent performance. In addition, discarding the entire 1G HugeTLB page only because of corrected memory errors sounds very costly and kernel better not doing under the hood. But today there are at least 2 such cases doing so: 1. when GHES driver sees both GHES_SEV_CORRECTED and CPER_SEC_ERROR_THRESHOLD_EXCEEDED after parsing CPER. 2. RAS Correctable Errors Collector counts correctable errors per PFN and when the counter for a PFN reaches threshold In both cases, userspace has no control of the soft offline performed by kernel's memory failure recovery. This commit gives userspace the control of softofflining any page: kernel only soft offlines raw page / transparent hugepage / HugeTLB hugepage if userspace has agreed to. The interface to userspace is a new sysctl at /proc/sys/vm/enable_soft_offline. By default its value is set to 1 to preserve existing behavior in kernel. When set to 0, soft-offline (e.g. MADV_SOFT_OFFLINE) will fail with EOPNOTSUPP. [jiaqiyan@google.com: v7] Link: https://lkml.kernel.org/r/20240628205958.2845610-3-jiaqiyan@google.com Link: https://lkml.kernel.org/r/20240626050818.2277273-3-jiaqiyan@google.com Signed-off-by: Jiaqi Yan <jiaqiyan@google.com> Acked-by: Miaohe Lin <linmiaohe@huawei.com> Acked-by: David Rientjes <rientjes@google.com> Cc: Frank van der Linden <fvdl@google.com> Cc: Jane Chu <jane.chu@oracle.com> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Lance Yang <ioworker0@gmail.com> Cc: Muchun Song <muchun.song@linux.dev> Cc: Naoya Horiguchi <nao.horiguchi@gmail.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Randy Dunlap <rdunlap@infradead.org> Cc: Shuah Khan <shuah@kernel.org> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> Signed-off-by: Wupeng Ma <mawupeng1@huawei.com> --- include/linux/mm.h | 1 + kernel/sysctl.c | 9 +++++++++ mm/memory-failure.c | 13 ++++++++++++- 3 files changed, 22 insertions(+), 1 deletion(-) diff --git a/include/linux/mm.h b/include/linux/mm.h index 67e299374ac8..0274a82144e4 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -2873,6 +2873,7 @@ extern int get_hwpoison_page(struct page *page); #define put_hwpoison_page(page) put_page(page) extern int sysctl_memory_failure_early_kill; extern int sysctl_memory_failure_recovery; +extern int sysctl_enable_soft_offline; extern void shake_page(struct page *p, int access); extern atomic_long_t num_poisoned_pages __read_mostly; extern int soft_offline_page(struct page *page, int flags); diff --git a/kernel/sysctl.c b/kernel/sysctl.c index 0d1f07dc7b44..f35a1990456e 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -1824,6 +1824,15 @@ static struct ctl_table vm_table[] = { .extra1 = &zero, .extra2 = &one, }, + { + .procname = "enable_soft_offline", + .data = &sysctl_enable_soft_offline, + .maxlen = sizeof(sysctl_enable_soft_offline), + .mode = 0644, + .proc_handler = proc_dointvec_minmax, + .extra1 = &zero, + .extra2 = &one, + }, #endif { .procname = "user_reserve_kbytes", diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 28bd5d6ed1bf..ad416016d1e9 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -67,6 +67,8 @@ int sysctl_memory_failure_early_kill __read_mostly = 0; int sysctl_memory_failure_recovery __read_mostly = 1; +int sysctl_enable_soft_offline __read_mostly = 1; + atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0); static bool page_handle_poison(struct page *page, bool hugepage_or_freepage, bool release) @@ -1996,7 +1998,9 @@ static int soft_offline_free_page(struct page *page) * @page: page to offline * @flags: flags. Same as memory_failure(). * - * Returns 0 on success, otherwise negated errno. + * Returns 0 on success, + * -EOPNOTSUPP for disabled by /proc/sys/vm/enable_soft_offline, + * < 0 otherwise negated errno. * * Soft offline a page, by migration or invalidation, * without killing anything. This is for the case when @@ -2027,6 +2031,13 @@ int soft_offline_page(struct page *page, int flags) return -EIO; } + if (!sysctl_enable_soft_offline) { + pr_info_once("disabled by /proc/sys/vm/enable_soft_offline\n"); + if (flags & MF_COUNT_INCREASED) + put_page(page); + return -EOPNOTSUPP; + } + if (PageHWPoison(page)) { pr_info("soft offline: %#lx page already poisoned\n", pfn); if (flags & MF_COUNT_INCREASED) -- 2.43.0
From: Kyle Meyer <kyle.meyer@hpe.com> maillist inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/ID6J4B Reference: https://lore.kernel.org/all/aMiu_Uku6Y5ZbuhM@hpe.com/T/#u -------------------------------- Some BIOS suppress ("cloak") corrected memory errors until a threshold is reached. Once that threshold is reached, BIOS reports a CPER with the "error threshold exceeded" bit set via GHES and the corresponding page is soft offlined. BIOS does not know the page type of the corresponding page. If the corresponding page happens to be a HugeTLB page, it will be dissolved, permanently reducing the HugeTLB page pool. This can be problematic for workloads that depend on a fixed number of HugeTLB pages. Currently, soft offline must be disabled to prevent HugeTLB pages from being soft offlined. This patch provides a middle ground. Soft offline can be disabled for HugeTLB pages while remaining enabled for non-HugeTLB pages, preserving the benefits of soft offline without the risk of BIOS soft offlining HugeTLB pages. Commit 56374430c5dfc ("mm/memory-failure: userspace controls soft-offlining pages") introduced the following sysctl interface to control soft offline: /proc/sys/vm/enable_soft_offline The interface does not distinguish between page types: 0 - Soft offline is disabled 1 - Soft offline is enabled Convert enable_soft_offline to a bitmask and support disabling soft offline for HugeTLB pages: Bits: 0 - Enable soft offline 1 - Disable soft offline for HugeTLB pages Supported values: 0 - Soft offline is disabled 1 - Soft offline is enabled 3 - Soft offline is enabled (disabled for HugeTLB pages) Existing behavior is preserved. Update documentation and HugeTLB soft offline self tests. Tony said: : Recap of original problem is that some BIOS keep track of error : threshold per-rank and use this GHES mechanism to report threshold : exceeded on the rank. : : Systems that stay up a long time can accumulate enough soft errors to : trigger this threshold. But the action of taking a page offline isn't : going to help. For a 4K page this is merely annoying. For 1G page it : can mess things up badly. : : My original patch for this just skipped the GHES->offline process for : huge pages. But I wasn't aware of the sysctl control. That provides a : better solution. Link: https://lkml.kernel.org/r/aMiu_Uku6Y5ZbuhM@hpe.com Signed-off-by: Kyle Meyer <kyle.meyer@hpe.com> Reported-by: Shawn Fan <shawn.fan@intel.com> Suggested-by: Tony Luck <tony.luck@intel.com> Cc: Borislav Betkov <bp@alien8.de> Cc: David Hildenbrand <david@redhat.com> Cc: Jane Chu <jane.chu@oracle.com> Cc: Jan Kara <jack@suse.cz> Cc: Jiaqi Yan <jiaqiyan@google.com> Cc: Joel Granados <joel.granados@kernel.org> Cc: Johannes Weiner <hannes@cmpxchg.org> Cc: Jonathan Corbet <corbet@lwn.net> Cc: Liam Howlett <liam.howlett@oracle.com> Cc: Lorenzo Stoakes <lorenzo.stoakes@oracle.com> Cc: Miaohe Lin <linmiaohe@huawei.com> Cc: Michal Clapinski <mclapinski@google.com> Cc: Michal Hocko <mhocko@suse.com> Cc: Mike Rapoport <rppt@kernel.org> Cc: Naoya Horiguchi <nao.horiguchi@gmail.com> Cc: Oscar Salvador <osalvador@suse.de> Cc: Russ Anderson <russ.anderson@hpe.com> Cc: Shuah Khan <shuah@kernel.org> Cc: Suren Baghdasaryan <surenb@google.com> Cc: Vlastimil Babka <vbabka@suse.cz> Cc: Yafang <laoar.shao@gmail.com> Signed-off-by: Andrew Morton <akpm@linux-foundation.org> --- .../ABI/testing/sysfs-memory-page-offline | 3 +++ kernel/sysctl.c | 2 +- mm/memory-failure.c | 16 ++++++++++++++-- 3 files changed, 18 insertions(+), 3 deletions(-) diff --git a/Documentation/ABI/testing/sysfs-memory-page-offline b/Documentation/ABI/testing/sysfs-memory-page-offline index e14703f12fdf..93285bbadc9e 100644 --- a/Documentation/ABI/testing/sysfs-memory-page-offline +++ b/Documentation/ABI/testing/sysfs-memory-page-offline @@ -20,6 +20,9 @@ Description: number, or a error when the offlining failed. Reading the file is not allowed. + Soft-offline can be controlled via sysctl, see: + Documentation/admin-guide/sysctl/vm.rst + What: /sys/devices/system/memory/hard_offline_page Date: Sep 2009 KernelVersion: 2.6.33 diff --git a/kernel/sysctl.c b/kernel/sysctl.c index f35a1990456e..88f92eff7bf2 100644 --- a/kernel/sysctl.c +++ b/kernel/sysctl.c @@ -1831,7 +1831,7 @@ static struct ctl_table vm_table[] = { .mode = 0644, .proc_handler = proc_dointvec_minmax, .extra1 = &zero, - .extra2 = &one, + .extra2 = &three, }, #endif { diff --git a/mm/memory-failure.c b/mm/memory-failure.c index ad416016d1e9..7fbc9c214da9 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -63,11 +63,14 @@ #include "internal.h" #include "ras/ras_event.h" +#define SOFT_OFFLINE_ENABLED BIT(0) +#define SOFT_OFFLINE_SKIP_HUGETLB BIT(1) + int sysctl_memory_failure_early_kill __read_mostly = 0; int sysctl_memory_failure_recovery __read_mostly = 1; -int sysctl_enable_soft_offline __read_mostly = 1; +int sysctl_enable_soft_offline __read_mostly = SOFT_OFFLINE_ENABLED; atomic_long_t num_poisoned_pages __read_mostly = ATOMIC_LONG_INIT(0); @@ -2031,13 +2034,22 @@ int soft_offline_page(struct page *page, int flags) return -EIO; } - if (!sysctl_enable_soft_offline) { + if (!(sysctl_enable_soft_offline & SOFT_OFFLINE_ENABLED)) { pr_info_once("disabled by /proc/sys/vm/enable_soft_offline\n"); if (flags & MF_COUNT_INCREASED) put_page(page); return -EOPNOTSUPP; } + if (sysctl_enable_soft_offline & SOFT_OFFLINE_SKIP_HUGETLB) { + if (PageHuge(page)) { + pr_info_once("disabled for HugeTLB pages by /proc/sys/vm/enable_soft_offline\n"); + if (flags & MF_COUNT_INCREASED) + put_page(page); + return -EOPNOTSUPP; + } + } + if (PageHWPoison(page)) { pr_info("soft offline: %#lx page already poisoned\n", pfn); if (flags & MF_COUNT_INCREASED) -- 2.43.0
participants (1)
-
Wupeng Ma