From: Xiangyou Xie xiexiangyou@huawei.com
euleros inclusion category: feature bugzilla: 46842 CVE: NA
-------------------------------------------------
There is a delay when injecting a vtimer interrupt. And When vcpu wfx vmexit, it will switch to bgtimer, and the interrupt simulation path will become longer.
Provides a mechanism for bgtimer to trigger in advance, which can be injected into the interrupt in advance to avoid delay.
Signed-off-by: Xiangyou Xie xiexiangyou@huawei.com Reviewed-by: Ying Fang fangying1@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- include/kvm/arm_arch_timer.h | 3 +++ virt/kvm/arm/arch_timer.c | 44 ++++++++++++++++++++++++++++++++++-- virt/kvm/arm/arm.c | 4 ++++ 3 files changed, 49 insertions(+), 2 deletions(-)
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h index d6e6a45d1d24..33771352dcd6 100644 --- a/include/kvm/arm_arch_timer.h +++ b/include/kvm/arm_arch_timer.h @@ -76,6 +76,9 @@ int kvm_arm_timer_has_attr(struct kvm_vcpu *vcpu, struct kvm_device_attr *attr);
bool kvm_timer_is_pending(struct kvm_vcpu *vcpu);
+void kvm_timer_schedule(struct kvm_vcpu *vcpu); +void kvm_timer_unschedule(struct kvm_vcpu *vcpu); + u64 kvm_phys_timer_read(void);
void kvm_timer_vcpu_load(struct kvm_vcpu *vcpu); diff --git a/virt/kvm/arm/arch_timer.c b/virt/kvm/arm/arch_timer.c index 0f8cfc95a056..e1dac464e48a 100644 --- a/virt/kvm/arm/arch_timer.c +++ b/virt/kvm/arm/arch_timer.c @@ -36,6 +36,9 @@ static struct timecounter *timecounter; static unsigned int host_vtimer_irq; static u32 host_vtimer_irq_flags;
+static unsigned int bgtimer_advance_cycles; +module_param(bgtimer_advance_cycles, uint, 0644); + static DEFINE_STATIC_KEY_FALSE(has_gic_active_state);
static const struct kvm_irq_level default_ptimer_irq = { @@ -135,6 +138,7 @@ static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu) u64 min_virt = ULLONG_MAX, min_phys = ULLONG_MAX; struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); + u64 min_expire;
if (kvm_timer_irq_can_fire(vtimer)) min_virt = kvm_timer_compute_delta(vtimer); @@ -146,7 +150,17 @@ static u64 kvm_timer_earliest_exp(struct kvm_vcpu *vcpu) if ((min_virt == ULLONG_MAX) && (min_phys == ULLONG_MAX)) return 0;
- return min(min_virt, min_phys); + min_expire = min(min_virt, min_phys); + + if (bgtimer_advance_cycles) { + u64 ns = cyclecounter_cyc2ns(timecounter->cc, + bgtimer_advance_cycles, + timecounter->mask, + &timecounter->frac); + min_expire = min_expire > ns ? min_expire - ns : 0; + } + + return min_expire; }
static enum hrtimer_restart kvm_bg_timer_expire(struct hrtimer *hrt) @@ -354,6 +368,7 @@ static void kvm_timer_blocking(struct kvm_vcpu *vcpu) struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu; struct arch_timer_context *vtimer = vcpu_vtimer(vcpu); struct arch_timer_context *ptimer = vcpu_ptimer(vcpu); + u64 expire;
/* * If both timers are not capable of raising interrupts (disabled or @@ -362,11 +377,30 @@ static void kvm_timer_blocking(struct kvm_vcpu *vcpu) if (!kvm_timer_irq_can_fire(vtimer) && !kvm_timer_irq_can_fire(ptimer)) return;
+ if (hrtimer_active(&timer->bg_timer)) + return; + /* * At least one guest time will expire. Schedule a background timer. * Set the earliest expiration time among the guest timers. */ - soft_timer_start(&timer->bg_timer, kvm_timer_earliest_exp(vcpu)); + expire = kvm_timer_earliest_exp(vcpu); + + if (expire && bgtimer_advance_cycles) { + if (vtimer->cnt_cval > bgtimer_advance_cycles) + vtimer->cnt_cval -= bgtimer_advance_cycles; + } + + soft_timer_start(&timer->bg_timer, expire); +} + +void kvm_timer_schedule(struct kvm_vcpu *vcpu) +{ + if (!bgtimer_advance_cycles || kvm_timer_is_pending(vcpu)) + return; + + vtimer_save_state(vcpu); + kvm_timer_blocking(vcpu); }
static void kvm_timer_unblocking(struct kvm_vcpu *vcpu) @@ -398,6 +432,12 @@ static void vtimer_restore_state(struct kvm_vcpu *vcpu) local_irq_restore(flags); }
+void kvm_timer_unschedule(struct kvm_vcpu *vcpu) +{ + vtimer_restore_state(vcpu); + kvm_timer_unblocking(vcpu); +} + static void set_cntvoff(u64 cntvoff) { u32 low = lower_32_bits(cntvoff); diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 8e04e7ee7d08..f2eec858458b 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -378,11 +378,15 @@ void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) preempt_enable();
kvm_vgic_v4_enable_doorbell(vcpu); + + kvm_timer_schedule(vcpu); }
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) { kvm_vgic_v4_disable_doorbell(vcpu); + + kvm_timer_unschedule(vcpu); }
int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)