From: Atish Patra atish.patra@wdc.com
euleros inclusion category: feature feature: initial KVM RISC-V support bugzilla: 46845 CVE: NA
Add a KVM_GET_ONE_REG/KVM_SET_ONE_REG ioctl interface for floating point registers such as F0-F31 and FCSR. This support is added for both 'F' and 'D' extensions.
Reference: https://gitee.com/openeuler/kernel/issues/I26X9V Signed-off-by: Atish Patra atish.patra@wdc.com Signed-off-by: Anup Patel anup.patel@wdc.com Signed-off-by: Mingwang Li limingwang@huawei.com Acked-by: Paolo Bonzini pbonzini@redhat.com Reviewed-by: Paolo Bonzini pbonzini@redhat.com Reviewed-by: Alexander Graf graf@amazon.com --- arch/riscv/include/uapi/asm/kvm.h | 10 +++ arch/riscv/kvm/vcpu.c | 104 ++++++++++++++++++++++++++++++ 2 files changed, 114 insertions(+)
diff --git a/arch/riscv/include/uapi/asm/kvm.h b/arch/riscv/include/uapi/asm/kvm.h index 08691dd27..f808ad1ce 100644 --- a/arch/riscv/include/uapi/asm/kvm.h +++ b/arch/riscv/include/uapi/asm/kvm.h @@ -113,6 +113,16 @@ struct kvm_riscv_timer { #define KVM_REG_RISCV_TIMER_REG(name) \ (offsetof(struct kvm_riscv_timer, name) / sizeof(__u64))
+/* F extension registers are mapped as type 5 */ +#define KVM_REG_RISCV_FP_F (0x05 << KVM_REG_RISCV_TYPE_SHIFT) +#define KVM_REG_RISCV_FP_F_REG(name) \ + (offsetof(struct __riscv_f_ext_state, name) / sizeof(__u32)) + +/* D extension registers are mapped as type 6 */ +#define KVM_REG_RISCV_FP_D (0x06 << KVM_REG_RISCV_TYPE_SHIFT) +#define KVM_REG_RISCV_FP_D_REG(name) \ + (offsetof(struct __riscv_d_ext_state, name) / sizeof(__u64)) + #endif
#endif /* __LINUX_KVM_RISCV_H */ diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index d039f17f1..2d8bab65d 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -431,6 +431,98 @@ static int kvm_riscv_vcpu_set_reg_csr(struct kvm_vcpu *vcpu, return 0; }
+static int kvm_riscv_vcpu_get_reg_fp(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg, + unsigned long rtype) +{ + struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; + unsigned long isa = vcpu->arch.isa; + unsigned long __user *uaddr = + (unsigned long __user *)(unsigned long)reg->addr; + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | + KVM_REG_SIZE_MASK | + rtype); + void *reg_val; + + if ((rtype == KVM_REG_RISCV_FP_F) && + riscv_isa_extension_available(&isa, f)) { + if (KVM_REG_SIZE(reg->id) != sizeof(u32)) + return -EINVAL; + if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr)) + reg_val = &cntx->fp.f.fcsr; + else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) && + reg_num <= KVM_REG_RISCV_FP_F_REG(f[31])) + reg_val = &cntx->fp.f.f[reg_num]; + else + return -EINVAL; + } else if ((rtype == KVM_REG_RISCV_FP_D) && + riscv_isa_extension_available(&isa, d)) { + if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) { + if (KVM_REG_SIZE(reg->id) != sizeof(u32)) + return -EINVAL; + reg_val = &cntx->fp.d.fcsr; + } else if ((KVM_REG_RISCV_FP_D_REG(f[0]) <= reg_num) && + reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) { + if (KVM_REG_SIZE(reg->id) != sizeof(u64)) + return -EINVAL; + reg_val = &cntx->fp.d.f[reg_num]; + } else + return -EINVAL; + } else + return -EINVAL; + + if (copy_to_user(uaddr, reg_val, KVM_REG_SIZE(reg->id))) + return -EFAULT; + + return 0; +} + +static int kvm_riscv_vcpu_set_reg_fp(struct kvm_vcpu *vcpu, + const struct kvm_one_reg *reg, + unsigned long rtype) +{ + struct kvm_cpu_context *cntx = &vcpu->arch.guest_context; + unsigned long isa = vcpu->arch.isa; + unsigned long __user *uaddr = + (unsigned long __user *)(unsigned long)reg->addr; + unsigned long reg_num = reg->id & ~(KVM_REG_ARCH_MASK | + KVM_REG_SIZE_MASK | + rtype); + void *reg_val; + + if ((rtype == KVM_REG_RISCV_FP_F) && + riscv_isa_extension_available(&isa, f)) { + if (KVM_REG_SIZE(reg->id) != sizeof(u32)) + return -EINVAL; + if (reg_num == KVM_REG_RISCV_FP_F_REG(fcsr)) + reg_val = &cntx->fp.f.fcsr; + else if ((KVM_REG_RISCV_FP_F_REG(f[0]) <= reg_num) && + reg_num <= KVM_REG_RISCV_FP_F_REG(f[31])) + reg_val = &cntx->fp.f.f[reg_num]; + else + return -EINVAL; + } else if ((rtype == KVM_REG_RISCV_FP_D) && + riscv_isa_extension_available(&isa, d)) { + if (reg_num == KVM_REG_RISCV_FP_D_REG(fcsr)) { + if (KVM_REG_SIZE(reg->id) != sizeof(u32)) + return -EINVAL; + reg_val = &cntx->fp.d.fcsr; + } else if ((KVM_REG_RISCV_FP_D_REG(f[0]) <= reg_num) && + reg_num <= KVM_REG_RISCV_FP_D_REG(f[31])) { + if (KVM_REG_SIZE(reg->id) != sizeof(u64)) + return -EINVAL; + reg_val = &cntx->fp.d.f[reg_num]; + } else + return -EINVAL; + } else + return -EINVAL; + + if (copy_from_user(reg_val, uaddr, KVM_REG_SIZE(reg->id))) + return -EFAULT; + + return 0; +} + static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) { @@ -442,6 +534,12 @@ static int kvm_riscv_vcpu_set_reg(struct kvm_vcpu *vcpu, return kvm_riscv_vcpu_set_reg_csr(vcpu, reg); else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER) return kvm_riscv_vcpu_set_reg_timer(vcpu, reg); + else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_F) + return kvm_riscv_vcpu_set_reg_fp(vcpu, reg, + KVM_REG_RISCV_FP_F); + else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D) + return kvm_riscv_vcpu_set_reg_fp(vcpu, reg, + KVM_REG_RISCV_FP_D);
return -EINVAL; } @@ -457,6 +555,12 @@ static int kvm_riscv_vcpu_get_reg(struct kvm_vcpu *vcpu, return kvm_riscv_vcpu_get_reg_csr(vcpu, reg); else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_TIMER) return kvm_riscv_vcpu_get_reg_timer(vcpu, reg); + else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_F) + return kvm_riscv_vcpu_get_reg_fp(vcpu, reg, + KVM_REG_RISCV_FP_F); + else if ((reg->id & KVM_REG_RISCV_TYPE_MASK) == KVM_REG_RISCV_FP_D) + return kvm_riscv_vcpu_get_reg_fp(vcpu, reg, + KVM_REG_RISCV_FP_D);
return -EINVAL; }