From: Tony Luck tony.luck@intel.com
mainline inclusion from mainline-v5.13 commit 171936ddaf97e6f4e1264f4128bb5cf15691339c category: bugfix bugzilla: 175120 CVE: NA
-------------------------------------------------
Patch series "mm,hwpoison: fix sending SIGBUS for Action Required MCE", v5.
I wrote this patchset to materialize what I think is the current allowable solution mentioned by the previous discussion [1]. I simply borrowed Tony's mutex patch and Aili's return code patch, then I queued another one to find error virtual address in the best effort manner. I know that this is not a perfect solution, but should work for some typical case.
[1]: https://lore.kernel.org/linux-mm/20210331192540.2141052f@alex-virtual-machin...
This patch (of 2):
There can be races when multiple CPUs consume poison from the same page. The first into memory_failure() atomically sets the HWPoison page flag and begins hunting for tasks that map this page. Eventually it invalidates those mappings and may send a SIGBUS to the affected tasks.
But while all that work is going on, other CPUs see a "success" return code from memory_failure() and so they believe the error has been handled and continue executing.
Fix by wrapping most of the internal parts of memory_failure() in a mutex.
[akpm@linux-foundation.org: make mf_mutex local to memory_failure()]
Link: https://lkml.kernel.org/r/20210521030156.2612074-1-nao.horiguchi@gmail.com Link: https://lkml.kernel.org/r/20210521030156.2612074-2-nao.horiguchi@gmail.com Signed-off-by: Tony Luck tony.luck@intel.com Signed-off-by: Naoya Horiguchi naoya.horiguchi@nec.com Reviewed-by: Borislav Petkov bp@suse.de Reviewed-by: Oscar Salvador osalvador@suse.de Cc: Aili Yao yaoaili@kingsoft.com Cc: Andy Lutomirski luto@kernel.org Cc: Borislav Petkov bp@alien8.de Cc: David Hildenbrand david@redhat.com Cc: Jue Wang juew@google.com Cc: stable@vger.kernel.org Signed-off-by: Andrew Morton akpm@linux-foundation.org Signed-off-by: Linus Torvalds torvalds@linux-foundation.org
Conflicts: mm/memory-failure.c Signed-off-by: Nanyong Sun sunnanyong@huawei.com Reviewed-by: Chen Wandun chenwandun@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- mm/memory-failure.c | 37 +++++++++++++++++++++++-------------- 1 file changed, 23 insertions(+), 14 deletions(-)
diff --git a/mm/memory-failure.c b/mm/memory-failure.c index 145262227bb08..54e93c679cec1 100644 --- a/mm/memory-failure.c +++ b/mm/memory-failure.c @@ -1256,8 +1256,9 @@ int memory_failure(unsigned long pfn, int flags) struct page *hpage; struct page *orig_head; struct dev_pagemap *pgmap; - int res; + int res = 0; unsigned long page_flags; + static DEFINE_MUTEX(mf_mutex);
if (!sysctl_memory_failure_recovery) panic("Memory failure on page %lx", pfn); @@ -1275,12 +1276,17 @@ int memory_failure(unsigned long pfn, int flags) return -ENXIO; }
- if (PageHuge(p)) - return memory_failure_hugetlb(pfn, flags); + mutex_lock(&mf_mutex); + + if (PageHuge(p)) { + res = memory_failure_hugetlb(pfn, flags); + goto unlock_mutex; + } + if (TestSetPageHWPoison(p)) { pr_err("Memory failure: %#lx: already hardware poisoned\n", pfn); - return 0; + goto unlock_mutex; }
orig_head = hpage = compound_head(p); @@ -1300,11 +1306,11 @@ int memory_failure(unsigned long pfn, int flags) if (!(flags & MF_COUNT_INCREASED) && !get_hwpoison_page(p)) { if (is_free_buddy_page(p)) { action_result(pfn, MF_MSG_BUDDY, MF_DELAYED); - return 0; } else { action_result(pfn, MF_MSG_KERNEL_HIGH_ORDER, MF_IGNORED); - return -EBUSY; + res = -EBUSY; } + goto unlock_mutex; }
if (PageTransHuge(hpage)) { @@ -1320,7 +1326,8 @@ int memory_failure(unsigned long pfn, int flags) if (TestClearPageHWPoison(p)) num_poisoned_pages_dec(); put_hwpoison_page(p); - return -EBUSY; + res = -EBUSY; + goto unlock_mutex; } unlock_page(p); VM_BUG_ON_PAGE(!page_count(p), p); @@ -1342,7 +1349,7 @@ int memory_failure(unsigned long pfn, int flags) action_result(pfn, MF_MSG_BUDDY, MF_DELAYED); else action_result(pfn, MF_MSG_BUDDY_2ND, MF_DELAYED); - return 0; + goto unlock_mutex; }
lock_page(p); @@ -1354,7 +1361,7 @@ int memory_failure(unsigned long pfn, int flags) if (PageCompound(p) && compound_head(p) != orig_head) { action_result(pfn, MF_MSG_DIFFERENT_COMPOUND, MF_IGNORED); res = -EBUSY; - goto out; + goto unlock_page; }
/* @@ -1377,14 +1384,14 @@ int memory_failure(unsigned long pfn, int flags) num_poisoned_pages_dec(); unlock_page(p); put_hwpoison_page(p); - return 0; + goto unlock_mutex; } if (hwpoison_filter(p)) { if (TestClearPageHWPoison(p)) num_poisoned_pages_dec(); unlock_page(p); put_hwpoison_page(p); - return 0; + goto unlock_mutex; }
/* @@ -1411,7 +1418,7 @@ int memory_failure(unsigned long pfn, int flags) if (!hwpoison_user_mappings(p, pfn, flags, &hpage)) { action_result(pfn, MF_MSG_UNMAP_FAILED, MF_IGNORED); res = -EBUSY; - goto out; + goto unlock_page; }
/* @@ -1420,13 +1427,15 @@ int memory_failure(unsigned long pfn, int flags) if (PageLRU(p) && !PageSwapCache(p) && p->mapping == NULL) { action_result(pfn, MF_MSG_TRUNCATED_LRU, MF_IGNORED); res = -EBUSY; - goto out; + goto unlock_page; }
identify_page_state: res = identify_page_state(pfn, p, page_flags); -out: +unlock_page: unlock_page(p); +unlock_mutex: + mutex_unlock(&mf_mutex); return res; } EXPORT_SYMBOL_GPL(memory_failure);