From: Xu Qiang xuqiang36@huawei.com
Offering: HULK hulk inclusion category: other bugzilla: NA
----------------------------------------------
When allocating hugetlb page memory, a hard deadlock is triggered. The log is as follows:
Call trace: dump_backtrace+0x0/0x1e0 show_stack+0x2c/0x38 dump_stack+0xc8/0x100 panic+0x1ec/0x408 nmi_panic+0x8c/0x90 watchdog_hardlockup_check+0x150/0x158 sdei_watchdog_callback+0x64/0x88 sdei_event_handler+0x24/0x88 __sdei_handler+0xbc/0x140 __sdei_asm_handler+0xbc/0x164 queued_spin_lock_slowpath+0x74/0x330 free_huge_page+0x2e0/0x2f0 __put_compound_page+0x44/0x50 __put_page+0x2c/0x68 free_mem_pool_single_page+0xd4/0x110 hdccom_free_mem_page+0x1e0/0x278 free_mem_page+0xa0/0xb8 hdcdrv_free_mem_mirror+0x50/0x88 hdcdrv_tx_finish_notify_task+0x118/0x3a0 tasklet_action_common.isra.2+0x150/0x168 tasklet_action+0x2c/0x38 __do_softirq+0x118/0x344 irq_exit+0xb8/0xd0 __handle_domain_irq+0x6c/0xc0 gic_handle_irq+0x64/0x140 el1_irq+0xac/0x140 dequeue_huge_page_nodemask+0x134/0x300 hugetlb_alloc_hugepage_nodemask+0x1a0/0x208 hugetlb_alloc_hugepage+0x6c/0x90 devmm_alloc_huge_page_node+0x50/0x90 devmm_alloc_pages_node+0x58/0x138 devmm_alloc_hugepage+0xac/0x280 devmm_insert_huge_pages+0x68/0x1b8 devmm_insert_pages+0x70/0xf0 devmm_chan_page_create_query_h2d_process+0x1c4/0x768 devmm_chan_msg_dispatch+0x254/0x468 agentmm_non_trans_msg_process+0xc0/0x1d0 agentdrv_non_trans_rx_msg_callback+0x54/0x190 agentdrv_non_trans_rx_msg_task+0xd0/0x3b8 process_one_work+0x1fc/0x488 worker_thread+0x154/0x4f8 kthread+0x130/0x138 ret_from_fork+0x10/0x18
The solution is to prevent interrupt reentry when allocating hugetlb page memory.
Signed-off-by: Xu Qiang xuqiang36@huawei.com --- fs/hugetlbfs/inode.c | 5 +++-- mm/hugetlb.c | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-)
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 179c860f952a..9fee0076cae4 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -128,9 +128,10 @@ static int hugetlb_checknode(struct vm_area_struct *vma, long nr) { int nid; int ret = 0; + unsigned long flags; struct hstate *h = &default_hstate;
- spin_lock(&hugetlb_lock); + spin_lock_irqsave(&hugetlb_lock, flags);
nid = vma->vm_flags >> CHECKNODE_BITS;
@@ -154,7 +155,7 @@ static int hugetlb_checknode(struct vm_area_struct *vma, long nr) }
err: - spin_unlock(&hugetlb_lock); + spin_unlock_irqrestore(&hugetlb_lock, flags); return ret; }
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 8a95216e53f1..1cf0a61831e4 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -6299,12 +6299,13 @@ EXPORT_SYMBOL_GPL(hugetlb_get_hstate); static struct page *hugetlb_alloc_hugepage_normal(struct hstate *h, gfp_t gfp_mask, int nid) { + unsigned long flags; struct page *page = NULL;
- spin_lock(&hugetlb_lock); + spin_lock_irqsave(&hugetlb_lock, flags); if (h->free_huge_pages - h->resv_huge_pages > 0) page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL, NULL); - spin_unlock(&hugetlb_lock); + spin_unlock_irqrestore(&hugetlb_lock, flags);
return page; }