From: Zenghui Yu yuzenghui@huawei.com
euleros inclusion category: feature bugzilla: 46842 CVE: NA
--------------------------------
Add trace to print vgic cpu interface
Signed-off-by: Zenghui Yu yuzenghui@huawei.com Signed-off-by: Xiangyou Xie xiexiangyou@huawei.com Reviewed-by: Ying Fang fangying1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- virt/kvm/arm/vgic/trace.h | 93 +++++++++++++++++++++++++++++++++++++ virt/kvm/arm/vgic/vgic-v3.c | 19 ++++++++ 2 files changed, 112 insertions(+)
diff --git a/virt/kvm/arm/vgic/trace.h b/virt/kvm/arm/vgic/trace.h index d9abaac6b4d5..5285048afb25 100644 --- a/virt/kvm/arm/vgic/trace.h +++ b/virt/kvm/arm/vgic/trace.h @@ -148,6 +148,99 @@ TRACE_EVENT(vgic_v3_fold_lr_state, __entry->vcpu_id, __entry->irq, __entry->val, __entry->lr) );
+TRACE_EVENT(vgic_v3_populate_lr_vgic_if, + TP_PROTO(unsigned long vcpu_id, unsigned long hcr, unsigned long vmcr, + unsigned long sre, unsigned long ap0r0, + unsigned long ap0r1, unsigned long ap0r2, unsigned long ap0r3, + unsigned long ap1r0, unsigned long ap1r1, unsigned long ap1r2, + unsigned long ap1r3), + TP_ARGS(vcpu_id, hcr, vmcr, sre, ap0r0, ap0r1, ap0r2, ap0r3, + ap1r0, ap1r1, ap1r2, ap1r3), + + TP_STRUCT__entry( + __field(unsigned long, vcpu_id) + __field(unsigned long, hcr) + __field(unsigned long, vmcr) + __field(unsigned long, sre) + __field(unsigned long, ap0r0) + __field(unsigned long, ap0r1) + __field(unsigned long, ap0r2) + __field(unsigned long, ap0r3) + __field(unsigned long, ap1r0) + __field(unsigned long, ap1r1) + __field(unsigned long, ap1r2) + __field(unsigned long, ap1r3) + ), + + TP_fast_assign( + __entry->vcpu_id = vcpu_id; + __entry->hcr = hcr; + __entry->vmcr = vmcr; + __entry->sre = sre; + __entry->ap0r0 = ap0r0; + __entry->ap0r1 = ap0r1; + __entry->ap0r2 = ap0r2; + __entry->ap0r3 = ap0r3; + __entry->ap1r0 = ap1r0; + __entry->ap1r1 = ap1r1; + __entry->ap1r2 = ap1r2; + __entry->ap1r3 = ap1r3; + ), + + TP_printk("VCPU: %ld, HCR: 0x%lx, VMCR: 0x%lx, SRE: 0x%lx, ap0r0: 0x%lx, ap0r1: 0x%lx, ap0r2: 0x%lx, ap0r3: 0x%lx, ap1r0: 0x%lx, ap1r1: 0x%lx, ap1r2: 0x%lx, ap1r3: 0x%lx,", + __entry->vcpu_id, __entry->hcr, __entry->vmcr, + __entry->sre, __entry->ap0r0, __entry->ap0r1, + __entry->ap0r2, __entry->ap0r3, __entry->ap1r0, + __entry->ap1r1, __entry->ap1r2, __entry->ap1r3) +); + +TRACE_EVENT(vgic_v3_fold_lr_state_vgic_if, + TP_PROTO(unsigned long vcpu_id, unsigned long hcr, unsigned long vmcr, + unsigned long sre, unsigned long ap0r0, + unsigned long ap0r1, unsigned long ap0r2, unsigned long ap0r3, + unsigned long ap1r0, unsigned long ap1r1, unsigned long ap1r2, + unsigned long ap1r3), + TP_ARGS(vcpu_id, hcr, vmcr, sre, ap0r0, ap0r1, ap0r2, ap0r3, + ap1r0, ap1r1, ap1r2, ap1r3), + + TP_STRUCT__entry( + __field(unsigned long, vcpu_id) + __field(unsigned long, hcr) + __field(unsigned long, vmcr) + __field(unsigned long, sre) + __field(unsigned long, ap0r0) + __field(unsigned long, ap0r1) + __field(unsigned long, ap0r2) + __field(unsigned long, ap0r3) + __field(unsigned long, ap1r0) + __field(unsigned long, ap1r1) + __field(unsigned long, ap1r2) + __field(unsigned long, ap1r3) + ), + + TP_fast_assign( + __entry->vcpu_id = vcpu_id; + __entry->hcr = hcr; + __entry->vmcr = vmcr; + __entry->sre = sre; + __entry->ap0r0 = ap0r0; + __entry->ap0r1 = ap0r1; + __entry->ap0r2 = ap0r2; + __entry->ap0r3 = ap0r3; + __entry->ap1r0 = ap1r0; + __entry->ap1r1 = ap1r1; + __entry->ap1r2 = ap1r2; + __entry->ap1r3 = ap1r3; + ), + + TP_printk("VCPU: %ld, HCR: 0x%lx, VMCR: 0x%lx, SRE: 0x%lx, ap0r0: 0x%lx, ap0r1: 0x%lx, ap0r2: 0x%lx, ap0r3: 0x%lx, ap1r0: 0x%lx, ap1r1: 0x%lx, ap1r2: 0x%lx, ap1r3: 0x%lx,", + __entry->vcpu_id, __entry->hcr, __entry->vmcr, + __entry->sre, __entry->ap0r0, __entry->ap0r1, + __entry->ap0r2, __entry->ap0r3, __entry->ap1r0, + __entry->ap1r1, __entry->ap1r2, __entry->ap1r3) +); + + #endif /* _TRACE_VGIC_H */
#undef TRACE_INCLUDE_PATH diff --git a/virt/kvm/arm/vgic/vgic-v3.c b/virt/kvm/arm/vgic/vgic-v3.c index a9c6afec8480..4d1f7afe26fe 100644 --- a/virt/kvm/arm/vgic/vgic-v3.c +++ b/virt/kvm/arm/vgic/vgic-v3.c @@ -127,6 +127,13 @@ void vgic_v3_fold_lr_state(struct kvm_vcpu *vcpu) }
vgic_cpu->used_lrs = 0; + + trace_vgic_v3_fold_lr_state_vgic_if(vcpu->vcpu_id, cpuif->vgic_hcr, + cpuif->vgic_vmcr, cpuif->vgic_sre, + cpuif->vgic_ap0r[0], cpuif->vgic_ap0r[1], + cpuif->vgic_ap0r[2], cpuif->vgic_ap0r[3], + cpuif->vgic_ap1r[0], cpuif->vgic_ap1r[1], + cpuif->vgic_ap1r[2], cpuif->vgic_ap1r[3]); }
/* Requires the irq to be locked already */ @@ -211,6 +218,18 @@ void vgic_v3_populate_lr(struct kvm_vcpu *vcpu, struct vgic_irq *irq, int lr)
vcpu->arch.vgic_cpu.vgic_v3.vgic_lr[lr] = val; trace_vgic_v3_populate_lr(vcpu->vcpu_id, irq->intid, val, lr); + trace_vgic_v3_populate_lr_vgic_if(vcpu->vcpu_id, + vcpu->arch.vgic_cpu.vgic_v3.vgic_hcr, + vcpu->arch.vgic_cpu.vgic_v3.vgic_vmcr, + vcpu->arch.vgic_cpu.vgic_v3.vgic_sre, + vcpu->arch.vgic_cpu.vgic_v3.vgic_ap0r[0], + vcpu->arch.vgic_cpu.vgic_v3.vgic_ap0r[1], + vcpu->arch.vgic_cpu.vgic_v3.vgic_ap0r[2], + vcpu->arch.vgic_cpu.vgic_v3.vgic_ap0r[3], + vcpu->arch.vgic_cpu.vgic_v3.vgic_ap1r[0], + vcpu->arch.vgic_cpu.vgic_v3.vgic_ap1r[1], + vcpu->arch.vgic_cpu.vgic_v3.vgic_ap1r[2], + vcpu->arch.vgic_cpu.vgic_v3.vgic_ap1r[3]); }
void vgic_v3_clear_lr(struct kvm_vcpu *vcpu, int lr)