From: Ma Wupeng mawupeng1@huawei.com
hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I8K5CO
--------------------------------
During page migration, page is copied in kernel space. If the origin page has UCE, there will lead to kernel panic.
In order to solve this problem, use machine check safe to catch this error which can be achieved by using copy_mc_to_kernel to replace copy_page. Signal SIGBUS will be send to user task if this UCE is consumed by this situation to avoid kernel panic.
Add a new param to copy_huge_page to support mc. If mc is set copy_mc_higepage will be called rather than copy_hugepage during memory copy.
Since migrate_page_move_mapping() is done before page copy, rollback is hard due to race condition. Do copy page at the start of function migrate_page_mc_extra() to solve this problem.
Signed-off-by: Ma Wupeng mawupeng1@huawei.com --- mm/migrate.c | 71 +++++++++++++++++++++++++++++++++++++++++++++------- 1 file changed, 62 insertions(+), 9 deletions(-)
diff --git a/mm/migrate.c b/mm/migrate.c index 646918708922..a9cc06948cd4 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -548,43 +548,65 @@ int migrate_huge_page_move_mapping(struct address_space *mapping, * arithmetic will work across the entire page. We need something more * specialized. */ -static void __copy_gigantic_page(struct page *dst, struct page *src, - int nr_pages) +static int __copy_gigantic_page(struct page *dst, struct page *src, + int nr_pages, bool mc) { - int i; + int i, ret = 0; struct page *dst_base = dst; struct page *src_base = src;
for (i = 0; i < nr_pages; ) { cond_resched(); - copy_highpage(dst, src); + + if (mc) { + ret = copy_mc_highpage(dst, src); + if (ret) + return -EFAULT; + } else { + copy_highpage(dst, src); + }
i++; dst = mem_map_next(dst, dst_base, i); src = mem_map_next(src, src_base, i); } + + return ret; }
-static void copy_huge_page(struct page *dst, struct page *src) +static int __copy_huge_page(struct page *dst, struct page *src, bool mc) { int nr_pages; + int ret;
if (PageHuge(src)) { /* hugetlbfs page */ struct hstate *h = page_hstate(src); nr_pages = pages_per_huge_page(h);
- if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) { - __copy_gigantic_page(dst, src, nr_pages); - return; - } + if (unlikely(nr_pages > MAX_ORDER_NR_PAGES)) + return __copy_gigantic_page(dst, src, nr_pages, mc); } else { /* thp page */ BUG_ON(!PageTransHuge(src)); nr_pages = thp_nr_pages(src); }
+ if (mc) + return copy_mc_highpages(dst, src, nr_pages); + copy_highpages(dst, src, nr_pages); + return 0; +} + +static int copy_huge_page(struct page *dst, struct page *src) +{ + return __copy_huge_page(dst, src, false); +} + +static int copy_mc_huge_page(struct page *dst, struct page *src) +{ + return __copy_huge_page(dst, src, true); }
/* @@ -674,6 +696,37 @@ void migrate_page_copy(struct page *newpage, struct page *page) } EXPORT_SYMBOL(migrate_page_copy);
+static int migrate_page_copy_mc(struct page *newpage, struct page *page) +{ + int rc; + + if (PageHuge(page) || PageTransHuge(page)) + rc = copy_mc_huge_page(newpage, page); + else + rc = copy_mc_highpage(newpage, page); + + return rc; +} + +static int migrate_page_mc_extra(struct address_space *mapping, + struct page *newpage, struct page *page, + enum migrate_mode mode, int extra_count) +{ + int rc; + + rc = migrate_page_copy_mc(newpage, page); + if (rc) + return rc; + + rc = migrate_page_move_mapping(mapping, newpage, page, extra_count); + if (rc != MIGRATEPAGE_SUCCESS) + return rc; + + migrate_page_states(newpage, page); + + return rc; +} + /************************************************************ * Migration functions ***********************************************************/