From: Lance Roy <ldr709(a)gmail.com>
mainline inclusion
from mainline-v5.0-rc1
commit d4d592a6eeda1e381f38f398e7a0474a599c11ed
category: feature
feature: convert assertions of spin_is_locked() into lockdep_assert_held()
-------------------------------------------------
lockdep_assert_held() is better suited to checking locking requirements,
since it only checks if the current thread holds the lock regardless of
whether someone else does. This is also a step towards possibly removing
spin_is_locked().
Signed-off-by: Lance Roy <ldr709(a)gmail.com>
Cc: Marc Zyngier <marc.zyngier(a)arm.com>
Cc: Eric Auger <eric.auger(a)redhat.com>
Cc: linux-arm-kernel(a)lists.infradead.org
Cc: <kvmarm(a)lists.cs.columbia.edu>
Signed-off-by: Paul E. McKenney <paulmck(a)linux.ibm.com>
Acked-by: Christoffer Dall <christoffer.dall(a)arm.com>
Signed-off-by: Zenghui Yu <yuzenghui(a)huawei.com>
Reviewed-by: Hailiang Zhang <zhang.zhanghailiang(a)huawei.com>
Signed-off-by: Lance Roy <ldr709(a)gmail.com>
Signed-off-by: Paul E. McKenney <paulmck(a)linux.ibm.com>
Acked-by: Christoffer Dall <christoffer.dall(a)arm.com>
Signed-off-by: Zenghui Yu <yuzenghui(a)huawei.com>
Signed-off-by: Yang Yingliang <yangyingliang(a)huawei.com>
---
virt/kvm/arm/vgic/vgic.c | 12 ++++++------
1 file changed, 6 insertions(+), 6 deletions(-)
diff --git a/virt/kvm/arm/vgic/vgic.c b/virt/kvm/arm/vgic/vgic.c
index 4040a33..d3ce29f 100644
--- a/virt/kvm/arm/vgic/vgic.c
+++ b/virt/kvm/arm/vgic/vgic.c
@@ -196,7 +196,7 @@ void vgic_irq_set_phys_active(struct vgic_irq *irq, bool active)
*/
static struct kvm_vcpu *vgic_target_oracle(struct vgic_irq *irq)
{
- DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
+ lockdep_assert_held(&irq->irq_lock);
/* If the interrupt is active, it must stay on the current vcpu */
if (irq->active)
@@ -280,7 +280,7 @@ static void vgic_sort_ap_list(struct kvm_vcpu *vcpu)
{
struct vgic_cpu *vgic_cpu = &vcpu->arch.vgic_cpu;
- DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
+ lockdep_assert_held(&vgic_cpu->ap_list_lock);
list_sort(NULL, &vgic_cpu->ap_list_head, vgic_irq_cmp);
}
@@ -318,7 +318,7 @@ bool vgic_queue_irq_unlock(struct kvm *kvm, struct vgic_irq *irq,
{
struct kvm_vcpu *vcpu;
- DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
+ lockdep_assert_held(&irq->irq_lock);
retry:
vcpu = vgic_target_oracle(irq);
@@ -709,7 +709,7 @@ static inline void vgic_fold_lr_state(struct kvm_vcpu *vcpu)
static inline void vgic_populate_lr(struct kvm_vcpu *vcpu,
struct vgic_irq *irq, int lr)
{
- DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&irq->irq_lock));
+ lockdep_assert_held(&irq->irq_lock);
if (kvm_vgic_global_state.type == VGIC_V2)
vgic_v2_populate_lr(vcpu, irq, lr);
@@ -743,7 +743,7 @@ static int compute_ap_list_depth(struct kvm_vcpu *vcpu,
*multi_sgi = false;
- DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
+ lockdep_assert_held(&vgic_cpu->ap_list_lock);
list_for_each_entry(irq, &vgic_cpu->ap_list_head, ap_list) {
int w;
@@ -768,7 +768,7 @@ static void vgic_flush_lr_state(struct kvm_vcpu *vcpu)
bool multi_sgi;
u8 prio = 0xff;
- DEBUG_SPINLOCK_BUG_ON(!spin_is_locked(&vgic_cpu->ap_list_lock));
+ lockdep_assert_held(&vgic_cpu->ap_list_lock);
count = compute_ap_list_depth(vcpu, &multi_sgi);
if (count > kvm_vgic_global_state.nr_lr || multi_sgi)
--
1.8.3