Sync patches for olk-5.10
Jian Zhang (1): ascend/hugetlb: Fix alloc static hugepage will fall back to other node
Xu Qiang (1): hugetlb: Fixed a deadlock issue when allocating hugetlb page memory.
Zhang Jian (2): Export function sched_setscheduler kernel/sdei: enable SDEI for nmi
Zhou Guanghui (1): mm/hugetlb: support disable clear hugepage
arch/arm64/Kconfig | 15 +++++++++++++++ arch/arm64/kernel/sdei.c | 2 ++ fs/hugetlbfs/inode.c | 5 +++-- kernel/sched/core.c | 1 + mm/hugetlb.c | 13 ++++++++----- 5 files changed, 29 insertions(+), 7 deletions(-)
From: Zhang Jian zhangjian210@huawei.com
Offering: HULK hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7SZC2
----------------------------------
Export the symbol sched_setscheduler().
Signed-off-by: Zhang Jian zhangjian210@huawei.com Signed-off-by: Zhang Zekun zhangzekun11@huawei.com --- kernel/sched/core.c | 1 + 1 file changed, 1 insertion(+)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c index 468427a25c9d..e20242b54c80 100644 --- a/kernel/sched/core.c +++ b/kernel/sched/core.c @@ -6608,6 +6608,7 @@ int sched_setscheduler(struct task_struct *p, int policy, { return _sched_setscheduler(p, policy, param, true); } +EXPORT_SYMBOL_GPL(sched_setscheduler);
int sched_setattr(struct task_struct *p, const struct sched_attr *attr) {
From: Xu Qiang xuqiang36@huawei.com
Offering: HULK hulk inclusion category: other bugzilla: NA
----------------------------------------------
When allocating hugetlb page memory, a hard deadlock is triggered. The log is as follows:
Call trace: dump_backtrace+0x0/0x1e0 show_stack+0x2c/0x38 dump_stack+0xc8/0x100 panic+0x1ec/0x408 nmi_panic+0x8c/0x90 watchdog_hardlockup_check+0x150/0x158 sdei_watchdog_callback+0x64/0x88 sdei_event_handler+0x24/0x88 __sdei_handler+0xbc/0x140 __sdei_asm_handler+0xbc/0x164 queued_spin_lock_slowpath+0x74/0x330 free_huge_page+0x2e0/0x2f0 __put_compound_page+0x44/0x50 __put_page+0x2c/0x68 free_mem_pool_single_page+0xd4/0x110 hdccom_free_mem_page+0x1e0/0x278 free_mem_page+0xa0/0xb8 hdcdrv_free_mem_mirror+0x50/0x88 hdcdrv_tx_finish_notify_task+0x118/0x3a0 tasklet_action_common.isra.2+0x150/0x168 tasklet_action+0x2c/0x38 __do_softirq+0x118/0x344 irq_exit+0xb8/0xd0 __handle_domain_irq+0x6c/0xc0 gic_handle_irq+0x64/0x140 el1_irq+0xac/0x140 dequeue_huge_page_nodemask+0x134/0x300 hugetlb_alloc_hugepage_nodemask+0x1a0/0x208 hugetlb_alloc_hugepage+0x6c/0x90 devmm_alloc_huge_page_node+0x50/0x90 devmm_alloc_pages_node+0x58/0x138 devmm_alloc_hugepage+0xac/0x280 devmm_insert_huge_pages+0x68/0x1b8 devmm_insert_pages+0x70/0xf0 devmm_chan_page_create_query_h2d_process+0x1c4/0x768 devmm_chan_msg_dispatch+0x254/0x468 agentmm_non_trans_msg_process+0xc0/0x1d0 agentdrv_non_trans_rx_msg_callback+0x54/0x190 agentdrv_non_trans_rx_msg_task+0xd0/0x3b8 process_one_work+0x1fc/0x488 worker_thread+0x154/0x4f8 kthread+0x130/0x138 ret_from_fork+0x10/0x18
The solution is to prevent interrupt reentry when allocating hugetlb page memory.
Signed-off-by: Xu Qiang xuqiang36@huawei.com --- fs/hugetlbfs/inode.c | 5 +++-- mm/hugetlb.c | 5 +++-- 2 files changed, 6 insertions(+), 4 deletions(-)
diff --git a/fs/hugetlbfs/inode.c b/fs/hugetlbfs/inode.c index 179c860f952a..9fee0076cae4 100644 --- a/fs/hugetlbfs/inode.c +++ b/fs/hugetlbfs/inode.c @@ -128,9 +128,10 @@ static int hugetlb_checknode(struct vm_area_struct *vma, long nr) { int nid; int ret = 0; + unsigned long flags; struct hstate *h = &default_hstate;
- spin_lock(&hugetlb_lock); + spin_lock_irqsave(&hugetlb_lock, flags);
nid = vma->vm_flags >> CHECKNODE_BITS;
@@ -154,7 +155,7 @@ static int hugetlb_checknode(struct vm_area_struct *vma, long nr) }
err: - spin_unlock(&hugetlb_lock); + spin_unlock_irqrestore(&hugetlb_lock, flags); return ret; }
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 8a95216e53f1..1cf0a61831e4 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -6299,12 +6299,13 @@ EXPORT_SYMBOL_GPL(hugetlb_get_hstate); static struct page *hugetlb_alloc_hugepage_normal(struct hstate *h, gfp_t gfp_mask, int nid) { + unsigned long flags; struct page *page = NULL;
- spin_lock(&hugetlb_lock); + spin_lock_irqsave(&hugetlb_lock, flags); if (h->free_huge_pages - h->resv_huge_pages > 0) page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL, NULL); - spin_unlock(&hugetlb_lock); + spin_unlock_irqrestore(&hugetlb_lock, flags);
return page; }
From: Zhou Guanghui zhouguanghui1@huawei.com
Offering: HULK hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7SZC2
-------------------------------------
Add support for disable clear hugepage.
Signed-off-by: Zhou Guanghui zhouguanghui1@huawei.com Signed-off-by: Zhang Zekun zhangzekun11@huawei.com --- arch/arm64/Kconfig | 7 +++++++ mm/hugetlb.c | 2 ++ 2 files changed, 9 insertions(+)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 30c747321b8e..5d3168f273bf 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -2119,6 +2119,13 @@ config ASCEND_SHARE_POOL This feature allows multiple processes to share virtual memory both in kernel and user level, which is only enabled for ascend platform.
+config ASCEND_CLEAR_HUGEPAGE_DISABLE + bool "Disable clear hugepage" + default n + help + Disable clear hugepage when alloc hugepages to improve the hugepage + application performance. + endif
endmenu diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 1cf0a61831e4..229b4170d1cd 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -4906,7 +4906,9 @@ static vm_fault_t hugetlb_no_page(struct mm_struct *mm, ret = vmf_error(PTR_ERR(page)); goto out; } +#ifndef CONFIG_ASCEND_CLEAR_HUGEPAGE_DISABLE clear_huge_page(page, address, pages_per_huge_page(h)); +#endif __SetPageUptodate(page); new_page = true;
From: Zhang Jian zhangjian210@huawei.com
Offering: HULK hulk inclusion category: feature bugzilla: https://gitee.com/openeuler/kernel/issues/I7SZC2
----------------------------------
enable SDEI for nmi
Signed-off-by: Zhang Jian zhangjian210@huawei.com Signed-off-by: Zhang Zekun zhangzekun11@huawei.com --- arch/arm64/Kconfig | 8 ++++++++ arch/arm64/kernel/sdei.c | 2 ++ 2 files changed, 10 insertions(+)
diff --git a/arch/arm64/Kconfig b/arch/arm64/Kconfig index 5d3168f273bf..1b418400aa27 100644 --- a/arch/arm64/Kconfig +++ b/arch/arm64/Kconfig @@ -2126,6 +2126,14 @@ config ASCEND_CLEAR_HUGEPAGE_DISABLE Disable clear hugepage when alloc hugepages to improve the hugepage application performance.
+config ASCEND_SDEI + bool "asend sdei features" + default n + depends on ARM_SDE_INTERFACE + help + SDEI features used in ascend scenarios, should be disabled in other + board. + endif
endmenu diff --git a/arch/arm64/kernel/sdei.c b/arch/arm64/kernel/sdei.c index 2132bd953a87..31ea45c02d93 100644 --- a/arch/arm64/kernel/sdei.c +++ b/arch/arm64/kernel/sdei.c @@ -123,6 +123,7 @@ bool _on_sdei_stack(unsigned long sp, struct stack_info *info)
unsigned long sdei_arch_get_entry_point(int conduit) { +#ifndef CONFIG_ASCEND_SDEI /* * SDEI works between adjacent exception levels. If we booted at EL1 we * assume a hypervisor is marshalling events. If we booted at EL2 and @@ -133,6 +134,7 @@ unsigned long sdei_arch_get_entry_point(int conduit) pr_err("Not supported on this hardware/boot configuration\n"); return 0; } +#endif
if (IS_ENABLED(CONFIG_VMAP_STACK)) { if (init_sdei_stacks())
From: Jian Zhang zhangjian210@huawei.com
Offering: HULK hulk inclusion category: other bugzilla: https://gitee.com/openeuler/kernel/issues/I7QLRH
----------------------------------------------
When use hugetlb_alloc_hugepage() to alloc static hugepage, the page will be allocated from other node which is different from the node user passed.
Signed-off-by: Jian Zhang zhangjian210@huawei.com Signed-off-by: Wang Wensheng wangwensheng4@huawei.com --- mm/hugetlb.c | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c index 229b4170d1cd..e144c7657ae2 100644 --- a/mm/hugetlb.c +++ b/mm/hugetlb.c @@ -6299,14 +6299,14 @@ const struct hstate *hugetlb_get_hstate(void) EXPORT_SYMBOL_GPL(hugetlb_get_hstate);
static struct page *hugetlb_alloc_hugepage_normal(struct hstate *h, - gfp_t gfp_mask, int nid) + gfp_t gfp_mask, int nid, nodemask_t *nodemask) { unsigned long flags; struct page *page = NULL;
spin_lock_irqsave(&hugetlb_lock, flags); if (h->free_huge_pages - h->resv_huge_pages > 0) - page = dequeue_huge_page_nodemask(h, gfp_mask, nid, NULL, NULL); + page = dequeue_huge_page_nodemask(h, gfp_mask, nid, nodemask, NULL); spin_unlock_irqrestore(&hugetlb_lock, flags);
return page; @@ -6337,7 +6337,7 @@ struct page *hugetlb_alloc_hugepage_nodemask(int nid, int flag, nodemask_t *node gfp_mask &= ~__GFP_RECLAIM;
if (flag & HUGETLB_ALLOC_NORMAL) - page = hugetlb_alloc_hugepage_normal(h, gfp_mask, nid); + page = hugetlb_alloc_hugepage_normal(h, gfp_mask, nid, nodemask); else if (flag & HUGETLB_ALLOC_BUDDY) page = alloc_migrate_huge_page(h, gfp_mask, nid, nodemask); else
反馈: 您发送到kernel@openeuler.org的补丁/补丁集,已成功转换为PR! PR链接地址: https://gitee.com/openeuler/kernel/pulls/2094 邮件列表地址:https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/3...
FeedBack: The patch(es) which you have sent to kernel@openeuler.org mailing list has been converted to a pull request successfully! Pull request link: https://gitee.com/openeuler/kernel/pulls/2094 Mailing list address: https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/3...