From: Kai Huang kai.huang@linux.intel.com
[ Upstream commit 7b6f8a06e482960ba6ab06faba51c8f3727a5c7b ]
As a prerequisite to fix several SPTE reserved bits related calculation errors caused by MKTME, which requires kvm_set_mmio_spte_mask() to use local static variable defined in mmu.c.
Also move call site of kvm_set_mmio_spte_mask() from kvm_arch_init() to kvm_mmu_module_init() so that kvm_set_mmio_spte_mask() can be static.
Reviewed-by: Sean Christopherson sean.j.christopherson@intel.com Signed-off-by: Kai Huang kai.huang@linux.intel.com Signed-off-by: Paolo Bonzini pbonzini@redhat.com Signed-off-by: Sasha Levin sashal@kernel.org Signed-off-by: Yang Yingliang yangyingliang@huawei.com --- arch/x86/kvm/mmu.c | 31 +++++++++++++++++++++++++++++++ arch/x86/kvm/x86.c | 31 ------------------------------- 2 files changed, 31 insertions(+), 31 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c index 18632f15b29f..80fc5bd4a32a 100644 --- a/arch/x86/kvm/mmu.c +++ b/arch/x86/kvm/mmu.c @@ -6050,6 +6050,35 @@ static int set_nx_huge_pages(const char *val, const struct kernel_param *kp) return 0; }
+static void kvm_set_mmio_spte_mask(void) +{ + u64 mask; + int maxphyaddr = boot_cpu_data.x86_phys_bits; + + /* + * Set the reserved bits and the present bit of an paging-structure + * entry to generate page fault with PFER.RSV = 1. + */ + + /* + * Mask the uppermost physical address bit, which would be reserved as + * long as the supported physical address width is less than 52. + */ + mask = 1ull << 51; + + /* Set the present bit. */ + mask |= 1ull; + + /* + * If reserved bit is not supported, clear the present bit to disable + * mmio page fault. + */ + if (maxphyaddr == 52) + mask &= ~1ull; + + kvm_mmu_set_mmio_spte_mask(mask, mask); +} + int kvm_mmu_module_init(void) { int ret = -ENOMEM; @@ -6059,6 +6088,8 @@ int kvm_mmu_module_init(void)
kvm_mmu_reset_all_pte_masks();
+ kvm_set_mmio_spte_mask(); + pte_list_desc_cache = kmem_cache_create("pte_list_desc", sizeof(struct pte_list_desc), 0, SLAB_ACCOUNT, NULL); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 0e6dfd3cb880..c43178e973ba 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -6768,35 +6768,6 @@ static struct perf_guest_info_callbacks kvm_guest_cbs = { .get_guest_ip = kvm_get_guest_ip, };
-static void kvm_set_mmio_spte_mask(void) -{ - u64 mask; - int maxphyaddr = boot_cpu_data.x86_phys_bits; - - /* - * Set the reserved bits and the present bit of an paging-structure - * entry to generate page fault with PFER.RSV = 1. - */ - - /* - * Mask the uppermost physical address bit, which would be reserved as - * long as the supported physical address width is less than 52. - */ - mask = 1ull << 51; - - /* Set the present bit. */ - mask |= 1ull; - - /* - * If reserved bit is not supported, clear the present bit to disable - * mmio page fault. - */ - if (maxphyaddr == 52) - mask &= ~1ull; - - kvm_mmu_set_mmio_spte_mask(mask, mask); -} - #ifdef CONFIG_X86_64 static void pvclock_gtod_update_fn(struct work_struct *work) { @@ -6874,8 +6845,6 @@ int kvm_arch_init(void *opaque) if (r) goto out_free_percpu;
- kvm_set_mmio_spte_mask(); - kvm_x86_ops = ops;
kvm_mmu_set_mask_ptes(PT_USER_MASK, PT_ACCESSED_MASK,