
Offering: HULK hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/ICTXL2 ------------------------------- The commit 94238c5ff554 ("[Huawei] arm64: mm: HVO: make spin_lock irq safe") is improper for X86 because it disable irq before TLB flush in split_vmemmap_huge_pmd, which need sending IPI under x86. The comments of smp_call_function_many_cond() has pointed out that deadlock can happen when called with interrupts disabled. Reverting to spin_lock for archs besides arm64. Fixes: 94238c5ff554 ("[Huawei] arm64: mm: HVO: make spin_lock irq safe") Signed-off-by: Nanyong Sun <sunnanyong@huawei.com> --- arch/arm64/include/asm/pgtable.h | 2 ++ mm/sparse-vmemmap.c | 12 ++++++++++-- 2 files changed, 12 insertions(+), 2 deletions(-) diff --git a/arch/arm64/include/asm/pgtable.h b/arch/arm64/include/asm/pgtable.h index f914c30b7487..91e2c5a1bf29 100644 --- a/arch/arm64/include/asm/pgtable.h +++ b/arch/arm64/include/asm/pgtable.h @@ -1043,6 +1043,8 @@ void vmemmap_update_pmd(unsigned long addr, pmd_t *pmdp, pte_t *ptep); #define vmemmap_update_pmd vmemmap_update_pmd void vmemmap_update_pte(unsigned long addr, pte_t *ptep, pte_t pte); #define vmemmap_update_pte vmemmap_update_pte +#define vmemmap_split_lock(lock) spin_lock_irq(lock) +#define vmemmap_split_unlock(lock) spin_unlock_irq(lock) #endif #endif /* !__ASSEMBLY__ */ diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c index a47b027af1f7..04dd9133aae7 100644 --- a/mm/sparse-vmemmap.c +++ b/mm/sparse-vmemmap.c @@ -70,6 +70,14 @@ static inline void vmemmap_update_pte(unsigned long addr, } #endif +#ifndef vmemmap_split_lock +#define vmemmap_split_lock(lock) spin_lock(lock) +#endif + +#ifndef vmemmap_split_unlock +#define vmemmap_split_unlock(lock) spin_unlock(lock) +#endif + #ifndef vmemmap_flush_tlb_all static inline void vmemmap_flush_tlb_all(void) { @@ -107,7 +115,7 @@ static int __split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start) set_pte_at(&init_mm, addr, pte, entry); } - spin_lock_irq(&init_mm.page_table_lock); + vmemmap_split_lock(&init_mm.page_table_lock); if (likely(pmd_leaf(*pmd))) { /* * Higher order allocations from buddy allocator must be able to @@ -124,7 +132,7 @@ static int __split_vmemmap_huge_pmd(pmd_t *pmd, unsigned long start) } else { pte_free_kernel(&init_mm, pgtable); } - spin_unlock_irq(&init_mm.page_table_lock); + vmemmap_split_unlock(&init_mm.page_table_lock); return 0; } -- 2.34.1