From: Paolo Bonzini pbonzini@redhat.com
mainline inclusion from mainline-5.0 commit 8fe65a8299f9e1f40cb95308ab7b3c4ad80bf801 category: bugfix bugzilla: NA CVE: NA
-------------------------------------------------
When manual dirty log reprotect will be enabled, kvm_get_dirty_log_protect's pointer argument will always be false on exit, because no TLB flush is needed until the manual re-protection operation. Rename it from "is_dirty" to "flush", which more accurately tells the caller what they have to do with it.
Signed-off-by: Paolo Bonzini pbonzini@redhat.com Signed-off-by: Xiangyou Xie xiexiangyou@huawei.com Reviewed-by: Ying Fang fangying1@huawei.com Reviewed-by: Zenghui Yu yuzenghui@huawei.com Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/mips/kvm/mips.c | 6 +++--- arch/x86/kvm/x86.c | 6 +++--- include/linux/kvm_host.h | 2 +- virt/kvm/arm/arm.c | 6 +++--- virt/kvm/kvm_main.c | 6 +++--- 5 files changed, 13 insertions(+), 13 deletions(-)
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c index 79485790f7b5..a8331c358e34 100644 --- a/arch/mips/kvm/mips.c +++ b/arch/mips/kvm/mips.c @@ -1006,14 +1006,14 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { struct kvm_memslots *slots; struct kvm_memory_slot *memslot; - bool is_dirty = false; + bool flush = false; int r;
mutex_lock(&kvm->slots_lock);
- r = kvm_get_dirty_log_protect(kvm, log, &is_dirty); + r = kvm_get_dirty_log_protect(kvm, log, &flush);
- if (is_dirty) { + if (flush) { slots = kvm_memslots(kvm); memslot = id_to_memslot(slots, log->slot);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index e53040611337..02a62fc42a04 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -4417,7 +4417,7 @@ static int kvm_vm_ioctl_reinject(struct kvm *kvm, */ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { - bool is_dirty = false; + bool flush = false; int r;
mutex_lock(&kvm->slots_lock); @@ -4428,14 +4428,14 @@ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) if (kvm_x86_ops->flush_log_dirty) kvm_x86_ops->flush_log_dirty(kvm);
- r = kvm_get_dirty_log_protect(kvm, log, &is_dirty); + r = kvm_get_dirty_log_protect(kvm, log, &flush);
/* * All the TLBs can be flushed out of mmu lock, see the comments in * kvm_mmu_slot_remove_write_access(). */ lockdep_assert_held(&kvm->slots_lock); - if (is_dirty) + if (flush) kvm_flush_remote_tlbs(kvm);
mutex_unlock(&kvm->slots_lock); diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h index 6d73855e0974..3b4c373b7c8a 100644 --- a/include/linux/kvm_host.h +++ b/include/linux/kvm_host.h @@ -812,7 +812,7 @@ int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, int *is_dirty);
int kvm_get_dirty_log_protect(struct kvm *kvm, - struct kvm_dirty_log *log, bool *is_dirty); + struct kvm_dirty_log *log, bool *flush);
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, struct kvm_memory_slot *slot, diff --git a/virt/kvm/arm/arm.c b/virt/kvm/arm/arm.c index 4a5765277646..ec7060ad909b 100644 --- a/virt/kvm/arm/arm.c +++ b/virt/kvm/arm/arm.c @@ -1297,14 +1297,14 @@ long kvm_arch_vcpu_ioctl(struct file *filp, */ int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) { - bool is_dirty = false; + bool flush = false; int r;
mutex_lock(&kvm->slots_lock);
- r = kvm_get_dirty_log_protect(kvm, log, &is_dirty); + r = kvm_get_dirty_log_protect(kvm, log, &flush);
- if (is_dirty) + if (flush) kvm_flush_remote_tlbs(kvm);
mutex_unlock(&kvm->slots_lock); diff --git a/virt/kvm/kvm_main.c b/virt/kvm/kvm_main.c index 5b4f4231292b..ced4277f1223 100644 --- a/virt/kvm/kvm_main.c +++ b/virt/kvm/kvm_main.c @@ -1214,7 +1214,7 @@ EXPORT_SYMBOL_GPL(kvm_get_dirty_log); * */ int kvm_get_dirty_log_protect(struct kvm *kvm, - struct kvm_dirty_log *log, bool *is_dirty) + struct kvm_dirty_log *log, bool *flush) { struct kvm_memslots *slots; struct kvm_memory_slot *memslot; @@ -1241,7 +1241,7 @@ int kvm_get_dirty_log_protect(struct kvm *kvm, memset(dirty_bitmap_buffer, 0, n);
spin_lock(&kvm->mmu_lock); - *is_dirty = false; + *flush = false; for (i = 0; i < n / sizeof(long); i++) { unsigned long mask; gfn_t offset; @@ -1249,7 +1249,7 @@ int kvm_get_dirty_log_protect(struct kvm *kvm, if (!dirty_bitmap[i]) continue;
- *is_dirty = true; + *flush = true;
mask = xchg(&dirty_bitmap[i], 0); dirty_bitmap_buffer[i] = mask;