From: Zengruan Ye yezengruan@huawei.com
euleros inclusion category: feature bugzilla: NA CVE: NA
--------------------------------
Implement the service call for configuring a shared structure between a vCPU and the hypervisor in which the hypervisor can tell the vCPU that is running or not.
Signed-off-by: Zengruan Ye yezengruan@huawei.com Reviewed-by: zhanghailiang zhang.zhanghailiang@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/arm/include/asm/kvm_host.h | 14 ++++++++++++++ arch/arm64/include/asm/kvm_host.h | 16 ++++++++++++++++ include/linux/kvm_types.h | 2 ++ virt/kvm/arm/arm.c | 8 ++++++++ virt/kvm/arm/hypercalls.c | 12 ++++++++++++ virt/kvm/arm/pvsched.c | 32 +++++++++++++++++++++++++++++++ 6 files changed, 84 insertions(+)
diff --git a/arch/arm/include/asm/kvm_host.h b/arch/arm/include/asm/kvm_host.h index 8f9a966a726f..ead428a9368e 100644 --- a/arch/arm/include/asm/kvm_host.h +++ b/arch/arm/include/asm/kvm_host.h @@ -303,6 +303,20 @@ static inline int kvm_arch_dev_ioctl_check_extension(struct kvm *kvm, long ext) int kvm_perf_init(void); int kvm_perf_teardown(void);
+static inline void kvm_arm_pvsched_vcpu_init(struct kvm_vcpu_arch *vcpu_arch) +{ +} + +static inline bool kvm_arm_is_pvsched_enabled(struct kvm_vcpu_arch *vcpu_arch) +{ + return false; +} + +static inline void kvm_update_pvsched_preempted(struct kvm_vcpu *vcpu, + u32 preempted) +{ +} + static inline int kvm_hypercall_pvsched_features(struct kvm_vcpu *vcpu) { return SMCCC_RET_NOT_SUPPORTED; diff --git a/arch/arm64/include/asm/kvm_host.h b/arch/arm64/include/asm/kvm_host.h index 452ed37f598e..11420fddaa82 100644 --- a/arch/arm64/include/asm/kvm_host.h +++ b/arch/arm64/include/asm/kvm_host.h @@ -324,6 +324,11 @@ struct kvm_vcpu_arch { /* True when deferrable sysregs are loaded on the physical CPU, * see kvm_vcpu_load_sysregs and kvm_vcpu_put_sysregs. */ bool sysregs_loaded_on_cpu; + + /* Guest PV sched state */ + struct { + gpa_t base; + } pvsched; };
/* vcpu_arch flags field values: */ @@ -430,6 +435,17 @@ void handle_exit_early(struct kvm_vcpu *vcpu, struct kvm_run *run, int kvm_perf_init(void); int kvm_perf_teardown(void);
+static inline void kvm_arm_pvsched_vcpu_init(struct kvm_vcpu_arch *vcpu_arch) +{ + vcpu_arch->pvsched.base = GPA_INVALID; +} + +static inline bool kvm_arm_is_pvsched_enabled(struct kvm_vcpu_arch *vcpu_arch) +{ + return (vcpu_arch->pvsched.base != GPA_INVALID); +} + +void kvm_update_pvsched_preempted(struct kvm_vcpu *vcpu, u32 preempted); int kvm_hypercall_pvsched_features(struct kvm_vcpu *vcpu);
void kvm_set_sei_esr(struct kvm_vcpu *vcpu, u64 syndrome); diff --git a/include/linux/kvm_types.h b/include/linux/kvm_types.h index a38729c8296f..458ec5b0ed06 100644 --- a/include/linux/kvm_types.h +++ b/include/linux/kvm_types.h @@ -49,6 +49,8 @@ typedef unsigned long gva_t; typedef u64 gpa_t; typedef u64 gfn_t;
+#define GPA_INVALID (~(gpa_t)0) + typedef unsigned long hva_t; typedef u64 hpa_t; typedef u64 hfn_t; diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 2fdb8ac0f921..42dd9a2875d0 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -373,6 +373,8 @@ int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
kvm_arm_reset_debug_ptr(vcpu);
+ kvm_arm_pvsched_vcpu_init(&vcpu->arch); + return kvm_vgic_vcpu_init(vcpu); }
@@ -423,6 +425,9 @@ void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) vcpu_set_wfe_traps(vcpu);
update_steal_time(vcpu); + + if (kvm_arm_is_pvsched_enabled(&vcpu->arch)) + kvm_update_pvsched_preempted(vcpu, 0); }
void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) @@ -436,6 +441,9 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) vcpu->cpu = -1;
kvm_arm_set_running_vcpu(NULL); + + if (kvm_arm_is_pvsched_enabled(&vcpu->arch)) + kvm_update_pvsched_preempted(vcpu, 1); }
static void vcpu_power_off(struct kvm_vcpu *vcpu) diff --git a/virt/kvm/arm/hypercalls.c b/virt/kvm/arm/hypercalls.c index 780240bdeb31..f708b54d1450 100644 --- a/virt/kvm/arm/hypercalls.c +++ b/virt/kvm/arm/hypercalls.c @@ -14,6 +14,7 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) u32 func_id = smccc_get_function(vcpu); u32 val = SMCCC_RET_NOT_SUPPORTED; u32 feature; + gpa_t gpa;
switch (func_id) { case ARM_SMCCC_VERSION_FUNC_ID: @@ -48,6 +49,17 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu) case ARM_SMCCC_HV_PV_SCHED_FEATURES: val = kvm_hypercall_pvsched_features(vcpu); break; + case ARM_SMCCC_HV_PV_SCHED_IPA_INIT: + gpa = smccc_get_arg1(vcpu); + if (gpa != GPA_INVALID) { + vcpu->arch.pvsched.base = gpa; + val = SMCCC_RET_SUCCESS; + } + break; + case ARM_SMCCC_HV_PV_SCHED_IPA_RELEASE: + vcpu->arch.pvsched.base = GPA_INVALID; + val = SMCCC_RET_SUCCESS; + break; default: return kvm_psci_call(vcpu); } diff --git a/virt/kvm/arm/pvsched.c b/virt/kvm/arm/pvsched.c index 40b56e01fc5d..8a1302a52464 100644 --- a/virt/kvm/arm/pvsched.c +++ b/virt/kvm/arm/pvsched.c @@ -5,9 +5,39 @@ */
#include <linux/arm-smccc.h> +#include <linux/kvm_host.h> + +#include <asm/pvsched-abi.h>
#include <kvm/arm_hypercalls.h>
+void kvm_update_pvsched_preempted(struct kvm_vcpu *vcpu, u32 preempted) +{ + __le32 preempted_le; + u64 offset; + int idx; + u64 base = vcpu->arch.pvsched.base; + struct kvm *kvm = vcpu->kvm; + + if (base == GPA_INVALID) + return; + + preempted_le = cpu_to_le32(preempted); + + /* + * This function is called from atomic context, so we need to + * disable page faults. + */ + pagefault_disable(); + + idx = srcu_read_lock(&kvm->srcu); + offset = offsetof(struct pvsched_vcpu_state, preempted); + kvm_put_guest(kvm, base + offset, preempted_le, u32); + srcu_read_unlock(&kvm->srcu, idx); + + pagefault_enable(); +} + int kvm_hypercall_pvsched_features(struct kvm_vcpu *vcpu) { u32 feature = smccc_get_arg1(vcpu); @@ -15,6 +45,8 @@ int kvm_hypercall_pvsched_features(struct kvm_vcpu *vcpu)
switch (feature) { case ARM_SMCCC_HV_PV_SCHED_FEATURES: + case ARM_SMCCC_HV_PV_SCHED_IPA_INIT: + case ARM_SMCCC_HV_PV_SCHED_IPA_RELEASE: val = SMCCC_RET_SUCCESS; break; }