hulk inclusion category: bugfix bugzilla: https://gitee.com/openeuler/kernel/issues/I8ZE0I CVE: NA
---------------------------------
There is softlockup under fio pressure test with smmu enabled: watchdog: BUG: soft lockup - CPU#81 stuck for 22s! [swapper/81:0] ... Call trace: fq_flush_timeout+0xc0/0x110 call_timer_fn+0x34/0x178 expire_timers+0xec/0x158 run_timer_softirq+0xc0/0x1f8 __do_softirq+0x120/0x324 irq_exit+0x11c/0x140 __handle_domain_irq+0x6c/0xc0 gic_handle_irq+0x6c/0x170 el1_irq+0xb8/0x140 arch_cpu_idle+0x38/0x1c0 default_idle_call+0x24/0x44 do_idle+0x1f4/0x2d8 cpu_startup_entry+0x2c/0x30 secondary_start_kernel+0x17c/0x1c8
This is because the timer callback fq_flush_timeout may run more than 10ms, and timer may be processed continuously in the softirq so trigger softlockup. We can use work to deal with fq_ring_free for each cpu which may take long time, that to avoid triggering softlockup.
Signed-off-by: Li Bin huawei.libin@huawei.com Signed-off-by: Peng Wu wupeng58@huawei.com Signed-off-by: Zhang Zekun zhangzekun11@huawei.com --- drivers/iommu/dma-iommu.c | 33 +++++++++++++++++++++++---------- 1 file changed, 23 insertions(+), 10 deletions(-)
diff --git a/drivers/iommu/dma-iommu.c b/drivers/iommu/dma-iommu.c index 4b1a88f514c9..b144ce2b2915 100644 --- a/drivers/iommu/dma-iommu.c +++ b/drivers/iommu/dma-iommu.c @@ -68,6 +68,8 @@ struct iommu_dma_cookie { /* Domain for flush queue callback; NULL if flush queue not in use */ struct iommu_domain *fq_domain; struct mutex mutex; + + struct work_struct free_iova_work; };
static DEFINE_STATIC_KEY_FALSE(iommu_deferred_attach_enabled); @@ -155,20 +157,11 @@ static void fq_flush_iotlb(struct iommu_dma_cookie *cookie) static void fq_flush_timeout(struct timer_list *t) { struct iommu_dma_cookie *cookie = from_timer(cookie, t, fq_timer); - int cpu;
atomic_set(&cookie->fq_timer_on, 0); fq_flush_iotlb(cookie);
- for_each_possible_cpu(cpu) { - unsigned long flags; - struct iova_fq *fq; - - fq = per_cpu_ptr(cookie->fq, cpu); - spin_lock_irqsave(&fq->lock, flags); - fq_ring_free(cookie, fq); - spin_unlock_irqrestore(&fq->lock, flags); - } + schedule_work(&cookie->free_iova_work); }
static void queue_iova(struct iommu_dma_cookie *cookie, @@ -235,9 +228,28 @@ static void iommu_dma_free_fq(struct iommu_dma_cookie *cookie) put_pages_list(&fq->entries[idx].freelist); }
+ flush_work(&cookie->free_iova_work); free_percpu(cookie->fq); }
+static void free_iova_work_func(struct work_struct *work) +{ + struct iommu_dma_cookie *cookie; + int cpu; + + cookie = container_of(work, struct iommu_dma_cookie, free_iova_work); + for_each_possible_cpu(cpu) { + unsigned long flags; + struct iova_fq *fq; + + fq = per_cpu_ptr(cookie->fq, cpu); + spin_lock_irqsave(&fq->lock, flags); + fq_ring_free(cookie, fq); + spin_unlock_irqrestore(&fq->lock, flags); + } +} + + /* sysfs updates are serialised by the mutex of the group owning @domain */ int iommu_dma_init_fq(struct iommu_domain *domain) { @@ -271,6 +283,7 @@ int iommu_dma_init_fq(struct iommu_domain *domain)
cookie->fq = queue;
+ INIT_WORK(&cookie->free_iova_work, free_iova_work_func); timer_setup(&cookie->fq_timer, fq_flush_timeout, 0); atomic_set(&cookie->fq_timer_on, 0); /*