It is redundant to do irqsave and irqrestore in hardIRQ context, where it has been in a irq-disabled context.
Signed-off-by: Xiaofei Tan tanxiaofei@huawei.com --- drivers/scsi/ipr.c | 21 +++++++++------------ 1 file changed, 9 insertions(+), 12 deletions(-)
diff --git a/drivers/scsi/ipr.c b/drivers/scsi/ipr.c index e451102..0309e8f 100644 --- a/drivers/scsi/ipr.c +++ b/drivers/scsi/ipr.c @@ -5815,7 +5815,6 @@ static irqreturn_t ipr_isr(int irq, void *devp) { struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp; struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg; - unsigned long hrrq_flags = 0; u32 int_reg = 0; int num_hrrq = 0; int irq_none = 0; @@ -5823,10 +5822,10 @@ static irqreturn_t ipr_isr(int irq, void *devp) irqreturn_t rc = IRQ_NONE; LIST_HEAD(doneq);
- spin_lock_irqsave(hrrq->lock, hrrq_flags); + spin_lock(hrrq->lock); /* If interrupts are disabled, ignore the interrupt */ if (!hrrq->allow_interrupts) { - spin_unlock_irqrestore(hrrq->lock, hrrq_flags); + spin_unlock(hrrq->lock); return IRQ_NONE; }
@@ -5862,7 +5861,7 @@ static irqreturn_t ipr_isr(int irq, void *devp) if (unlikely(rc == IRQ_NONE)) rc = ipr_handle_other_interrupt(ioa_cfg, int_reg);
- spin_unlock_irqrestore(hrrq->lock, hrrq_flags); + spin_unlock(hrrq->lock); list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) { list_del(&ipr_cmd->queue); del_timer(&ipr_cmd->timer); @@ -5883,16 +5882,15 @@ static irqreturn_t ipr_isr_mhrrq(int irq, void *devp) { struct ipr_hrr_queue *hrrq = (struct ipr_hrr_queue *)devp; struct ipr_ioa_cfg *ioa_cfg = hrrq->ioa_cfg; - unsigned long hrrq_flags = 0; struct ipr_cmnd *ipr_cmd, *temp; irqreturn_t rc = IRQ_NONE; LIST_HEAD(doneq);
- spin_lock_irqsave(hrrq->lock, hrrq_flags); + spin_lock(hrrq->lock);
/* If interrupts are disabled, ignore the interrupt */ if (!hrrq->allow_interrupts) { - spin_unlock_irqrestore(hrrq->lock, hrrq_flags); + spin_unlock(hrrq->lock); return IRQ_NONE; }
@@ -5900,7 +5898,7 @@ static irqreturn_t ipr_isr_mhrrq(int irq, void *devp) if ((be32_to_cpu(*hrrq->hrrq_curr) & IPR_HRRQ_TOGGLE_BIT) == hrrq->toggle_bit) { irq_poll_sched(&hrrq->iopoll); - spin_unlock_irqrestore(hrrq->lock, hrrq_flags); + spin_unlock(hrrq->lock); return IRQ_HANDLED; } } else { @@ -5911,7 +5909,7 @@ static irqreturn_t ipr_isr_mhrrq(int irq, void *devp) rc = IRQ_HANDLED; }
- spin_unlock_irqrestore(hrrq->lock, hrrq_flags); + spin_unlock(hrrq->lock);
list_for_each_entry_safe(ipr_cmd, temp, &doneq, queue) { list_del(&ipr_cmd->queue); @@ -10087,16 +10085,15 @@ static int ipr_request_other_msi_irqs(struct ipr_ioa_cfg *ioa_cfg, static irqreturn_t ipr_test_intr(int irq, void *devp) { struct ipr_ioa_cfg *ioa_cfg = (struct ipr_ioa_cfg *)devp; - unsigned long lock_flags = 0; irqreturn_t rc = IRQ_HANDLED;
dev_info(&ioa_cfg->pdev->dev, "Received IRQ : %d\n", irq); - spin_lock_irqsave(ioa_cfg->host->host_lock, lock_flags); + spin_lock(ioa_cfg->host->host_lock);
ioa_cfg->msi_received = 1; wake_up(&ioa_cfg->msi_wait_q);
- spin_unlock_irqrestore(ioa_cfg->host->host_lock, lock_flags); + spin_unlock(ioa_cfg->host->host_lock); return rc; }