hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I8ZE0I CVE: NA
--------------------------------
There is a softlockup undering cpu pressure test. ... pc : _raw_spin_unlock_irqrestore+0x14/0x78 lr : fq_flush_timeout+0x94/0x118 ... Call trace: _raw_spin_unlock_irqrestore+0x14/0x78 call_timer_fn+0x3c/0x1d0 expire_timers+0xcc/0x190 run_timer_softirq+0xfc/0x268 __do_softirq+0x128/0x3dc ____do_softirq+0x18/0x30 call_on_irq_stack+0x24/0x30 do_softirq_own_stack+0x24/0x38 irq_exit_rcu+0xc0/0xe8 el1_interrupt+0x48/0xc0 el1h_64_irq_handler+0x18/0x28 el1h_64_irq+0x78/0x80 __schedule+0xf28/0x12a0 schedule+0x3c/0x108 schedule_timeout+0xa0/0x1d0 pktgen_thread_worker+0x1180/0x15d0 kthread+0x120/0x130 ret_from_fork+0x10/0x20
This is because the timer callback fq_flush_timeout may run more than 10ms, and timer may be processed continuously in the softirq so trigger softlockup. We can use work to deal with fq_ring_free for each cpu which may take long time, that to avoid triggering softlockup.
Signed-off-by: Li Bin huawei.libin@huawei.com Signed-off-by: Zhang Zekun zhangzekun11@huawei.com --- drivers/iommu/dma-iommu.c | 26 +++++++++++++++++++------- 1 file changed, 19 insertions(+), 7 deletions(-)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 037fcf826407..58a90521630e 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -85,6 +85,8 @@ struct iommu_dma_cookie { /* Options for dma-iommu use */ struct iommu_dma_options options; struct mutex mutex; + + struct work_struct free_iova_work; };
static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled); @@ -184,17 +186,11 @@ static void fq_flush_iotlb(struct iommu_dma_cookie *cookie) static void fq_flush_timeout(struct timer_list *t) { struct iommu_dma_cookie *cookie = from_timer(cookie, t, fq_timer); - int cpu;
atomic_set(&cookie->fq_timer_on, 0); fq_flush_iotlb(cookie);
- if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE) { - fq_ring_free(cookie, cookie->single_fq); - } else { - for_each_possible_cpu(cpu) - fq_ring_free(cookie, per_cpu_ptr(cookie->percpu_fq, cpu)); - } + schedule_work(&cookie->free_iova_work); }
static void queue_iova(struct iommu_dma_cookie *cookie, @@ -279,6 +275,7 @@ static void iommu_dma_free_fq(struct iommu_dma_cookie *cookie) return;
del_timer_sync(&cookie->fq_timer); + flush_work(&cookie->free_iova_work); if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE) iommu_dma_free_fq_single(cookie->single_fq); else @@ -330,6 +327,20 @@ static int iommu_dma_init_fq_percpu(struct iommu_dma_cookie *cookie) return 0; }
+static void free_iova_work_func(struct work_struct *work) +{ + struct iommu_dma_cookie *cookie = container_of(work, struct iommu_dma_cookie, free_iova_work); + int cpu; + + if (cookie->options.qt == IOMMU_DMA_OPTS_SINGLE_QUEUE) { + fq_ring_free(cookie, cookie->single_fq); + } else { + for_each_possible_cpu(cpu) + fq_ring_free(cookie, per_cpu_ptr(cookie->percpu_fq, cpu)); + } + +} + /* sysfs updates are serialised by the mutex of the group owning @domain */ int iommu_dma_init_fq(struct iommu_domain *domain) { @@ -352,6 +363,7 @@ int iommu_dma_init_fq(struct iommu_domain *domain) return -ENOMEM; }
+ INIT_WORK(&cookie->free_iova_work, free_iova_work_func); timer_setup(&cookie->fq_timer, fq_flush_timeout, 0); atomic_set(&cookie->fq_timer_on, 0); /*
反馈: 您发送到kernel@openeuler.org的补丁/补丁集,已成功转换为PR! PR链接地址: https://gitee.com/openeuler/kernel/pulls/5282 邮件列表地址:https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/G...
FeedBack: The patch(es) which you have sent to kernel@openeuler.org mailing list has been converted to a pull request successfully! Pull request link: https://gitee.com/openeuler/kernel/pulls/5282 Mailing list address: https://mailweb.openeuler.org/hyperkitty/list/kernel@openeuler.org/message/G...