From: limingwang limingwang@huawei.com
euleros inclusion category: feature bugzilla: NA CVE: NA
riscv_kvm_v13 developed on linux 5.8-rc4, this tag is v5.5.19, so here are some interfaces that need to be adapted.
Link: https://gitee.com/openeuler/kernel/issues/I1RR1Y Signed-off-by: Mingwang Li limingwang@huawei.com Reviewed-by: Yifei Jiang jiangyifei@huawei.com Signed-off-by: Xie XiuQi xiexiuqi@huawei.com --- arch/riscv/kvm/main.c | 4 ++-- arch/riscv/kvm/mmu.c | 15 ++++++------- arch/riscv/kvm/vcpu.c | 49 ++++++++++++++++++++++++++++++++----------- 3 files changed, 47 insertions(+), 21 deletions(-)
diff --git a/arch/riscv/kvm/main.c b/arch/riscv/kvm/main.c index 6f213bcec0e8..9460e291e906 100644 --- a/arch/riscv/kvm/main.c +++ b/arch/riscv/kvm/main.c @@ -20,12 +20,12 @@ long kvm_arch_dev_ioctl(struct file *filp, return -EINVAL; }
-int kvm_arch_check_processor_compat(void *opaque) +int kvm_arch_check_processor_compat(void) { return 0; }
-int kvm_arch_hardware_setup(void *opaque) +int kvm_arch_hardware_setup(void) { return 0; } diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c index 88bce80ee983..81d15ca5264a 100644 --- a/arch/riscv/kvm/mmu.c +++ b/arch/riscv/kvm/mmu.c @@ -433,7 +433,8 @@ void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot) { }
-void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free) +void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, + struct kvm_memory_slot *dont) { }
@@ -459,7 +460,7 @@ void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
void kvm_arch_commit_memory_region(struct kvm *kvm, const struct kvm_userspace_memory_region *mem, - struct kvm_memory_slot *old, + const struct kvm_memory_slot *old, const struct kvm_memory_slot *new, enum kvm_mr_change change) { @@ -494,7 +495,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, (stage2_gpa_size >> PAGE_SHIFT)) return -EFAULT;
- mmap_read_lock(current->mm); + down_read(¤t->mm->mmap_sem);
/* * A memory region could potentially cover multiple VMAs, and @@ -560,7 +561,7 @@ int kvm_arch_prepare_memory_region(struct kvm *kvm, spin_unlock(&kvm->mmu_lock);
out: - mmap_read_unlock(current->mm); + up_read(¤t->mm->mmap_sem); return ret; }
@@ -669,12 +670,12 @@ int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu, !(memslot->flags & KVM_MEM_READONLY)) ? true : false; unsigned long vma_pagesize, mmu_seq;
- mmap_read_lock(current->mm); + down_read(¤t->mm->mmap_sem);
vma = find_vma_intersection(current->mm, hva, hva + 1); if (unlikely(!vma)) { kvm_err("Failed to find VMA for hva 0x%lx\n", hva); - mmap_read_unlock(current->mm); + up_read(¤t->mm->mmap_sem); return -EFAULT; }
@@ -689,7 +690,7 @@ int kvm_riscv_stage2_map(struct kvm_vcpu *vcpu, if (vma_pagesize == PMD_SIZE || vma_pagesize == PGDIR_SIZE) gfn = (gpa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
- mmap_read_unlock(current->mm); + up_read(¤t->mm->mmap_sem);
if (vma_pagesize != PGDIR_SIZE && vma_pagesize != PMD_SIZE && diff --git a/arch/riscv/kvm/vcpu.c b/arch/riscv/kvm/vcpu.c index adb0815951aa..d10905c18cb3 100644 --- a/arch/riscv/kvm/vcpu.c +++ b/arch/riscv/kvm/vcpu.c @@ -20,6 +20,9 @@ #include <asm/csr.h> #include <asm/hwcap.h>
+#define VCPU_STAT(n, x, ...) \ + { n, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU, ## __VA_ARGS__ } + struct kvm_stats_debugfs_item debugfs_entries[] = { VCPU_STAT("halt_successful_poll", halt_successful_poll), VCPU_STAT("halt_attempted_poll", halt_attempted_poll), @@ -148,7 +151,35 @@ int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id) return 0; }
-int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) +struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) +{ + int err; + struct kvm_vcpu *vcpu; + + vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); + if (!vcpu) { + err = -ENOMEM; + goto out; + } + + err = kvm_vcpu_init(vcpu, kvm, id); + if (err) + goto free_vcpu; + + return vcpu; + +free_vcpu: + kmem_cache_free(kvm_vcpu_cache, vcpu); +out: + return ERR_PTR(err); +} + +int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) +{ + return 0; +} + +int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) { struct kvm_cpu_context *cntx;
@@ -175,11 +206,6 @@ int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu) return 0; }
-int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) -{ - return 0; -} - void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) { } @@ -827,13 +853,13 @@ void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
static void kvm_riscv_check_vcpu_requests(struct kvm_vcpu *vcpu) { - struct rcuwait *wait = kvm_arch_vcpu_get_wait(vcpu); + struct swait_queue_head *wq = kvm_arch_vcpu_wq(vcpu);
if (kvm_request_pending(vcpu)) { if (kvm_check_request(KVM_REQ_SLEEP, vcpu)) { - rcuwait_wait_event(wait, - (!vcpu->arch.power_off) && (!vcpu->arch.pause), - TASK_INTERRUPTIBLE); + swait_event_interruptible_exclusive(*wq, + ((!vcpu->arch.power_off) && + (!vcpu->arch.pause)));
if (vcpu->arch.power_off || vcpu->arch.pause) { /* @@ -862,11 +888,10 @@ static void kvm_riscv_update_hvip(struct kvm_vcpu *vcpu) csr_write(CSR_HVIP, csr->hvip); }
-int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu) +int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) { int ret; struct kvm_cpu_trap trap; - struct kvm_run *run = vcpu->run;
/* Mark this VCPU ran at least once */ vcpu->arch.ran_atleast_once = true;