
Offering: HULK hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/IBZIRG ------------------------------- The commit 9f779dc0a09c ("arm64: mm: HVO: support BBM of vmemmap pgtable safely") is improper for X86 because it disable irq before TLB flush in split_vmemmap_huge_pmd, which need sending IPI under x86. The comments of smp_call_function_many_cond() has pointed out that deadlock can happen when called with interrupts disabled. Reverting to spin_lock for archs besides arm64. Fixes: 9f779dc0a09c ("arm64: mm: HVO: support BBM of vmemmap pgtable safely") Signed-off-by: Nanyong Sun <sunnanyong@huawei.com> --- arch/arm64/include/asm/pgtable.h | 2 ++ mm/hugetlb_vmemmap.c | 12 ++++++++++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index 626e43967e0a..2b66aab73dbc 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -1515,6 +1515,8 @@ void vmemmap_update_pmd(unsigned long addr, pmd_t *pmdp, pte_t *ptep); #define vmemmap_update_pmd vmemmap_update_pmd void vmemmap_update_pte(unsigned long addr, pte_t *ptep, pte_t pte); #define vmemmap_update_pte vmemmap_update_pte +#define vmemmap_split_lock(lock) spin_lock_irq(lock) +#define vmemmap_split_unlock(lock) spin_unlock_irq(lock) #endif #endif /* !__ASSEMBLY__ */ diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c index 149ab629855c..427cfd08069e 100644 --- a/mm/hugetlb_vmemmap.c +++ b/mm/hugetlb_vmemmap.c @@ -69,6 +69,14 @@ static inline void vmemmap_flush_tlb_range(unsigned long start, } #endif +#ifndef vmemmap_split_lock +#define vmemmap_split_lock(lock) spin_lock(lock) +#endif + +#ifndef vmemmap_split_unlock +#define vmemmap_split_unlock(lock) spin_unlock(lock) +#endif + static int split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start) { pmd_t __pmd; @@ -99,7 +107,7 @@ static int split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start) set_pte_at(&init_mm, addr, pte, entry); } - spin_lock_irq(&init_mm.page_table_lock); + vmemmap_split_lock(&init_mm.page_table_lock); if (likely(pmd_leaf(*pmd))) { /* * Higher order allocations from buddy allocator must be able to @@ -116,7 +124,7 @@ static int split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start) } else { pte_free_kernel(&init_mm, pgtable); } - spin_unlock_irq(&init_mm.page_table_lock); + vmemmap_split_unlock(&init_mm.page_table_lock); return 0; } -- 2.34.1