This reverts commit 8cc25436c41592b2236b4d2911a3031131a62872. --- arch/arm64/include/asm/kvm_asm.h | 15 ---------- arch/arm64/kernel/vmlinux.lds.S | 8 ----- arch/arm64/kvm/hyp/entry.S | 16 ++++------ arch/arm64/kvm/hyp/hyp-entry.S | 51 +++++++++++++------------------- arch/arm64/kvm/hyp/switch.c | 31 ------------------- 5 files changed, 26 insertions(+), 95 deletions(-)
diff --git a/arch/arm64/include/asm/kvm_asm.h b/arch/arm64/include/asm/kvm_asm.h index 5df55a4dab42..400cb2af5ba6 100644 --- a/arch/arm64/include/asm/kvm_asm.h +++ b/arch/arm64/include/asm/kvm_asm.h @@ -146,21 +146,6 @@ extern u32 __kvm_get_mdcr_el2(void); kern_hyp_va \vcpu .endm
-/* - * KVM extable for unexpected exceptions. - * In the same format _asm_extable, but output to a different section so that - * it can be mapped to EL2. The KVM version is not sorted. The caller must - * ensure: - * x18 has the hypervisor value to allow any Shadow-Call-Stack instrumented - * code to write to it, and that SPSR_EL2 and ELR_EL2 are restored by the fixup. - */ -.macro _kvm_extable, from, to - .pushsection __kvm_ex_table, "a" - .align 3 - .long (\from - .), (\to - .) - .popsection -.endm - #endif
#endif /* __ARM_KVM_ASM_H__ */ diff --git a/arch/arm64/kernel/vmlinux.lds.S b/arch/arm64/kernel/vmlinux.lds.S index 69e7c8d4a00f..d6050c6e65bc 100644 --- a/arch/arm64/kernel/vmlinux.lds.S +++ b/arch/arm64/kernel/vmlinux.lds.S @@ -24,13 +24,6 @@ ENTRY(_text)
jiffies = jiffies_64;
- -#define HYPERVISOR_EXTABLE \ - . = ALIGN(SZ_8); \ - __start___kvm_ex_table = .; \ - *(__kvm_ex_table) \ - __stop___kvm_ex_table = .; - #define HYPERVISOR_TEXT \ /* \ * Align to 4 KB so that \ @@ -46,7 +39,6 @@ jiffies = jiffies_64; __hyp_idmap_text_end = .; \ __hyp_text_start = .; \ *(.hyp.text) \ - HYPERVISOR_EXTABLE \ __hyp_text_end = .;
#define IDMAP_TEXT \ diff --git a/arch/arm64/kvm/hyp/entry.S b/arch/arm64/kvm/hyp/entry.S index 90e012fa3ca5..a0552b5177c5 100644 --- a/arch/arm64/kvm/hyp/entry.S +++ b/arch/arm64/kvm/hyp/entry.S @@ -164,22 +164,18 @@ alternative_endif // This is our single instruction exception window. A pending // SError is guaranteed to occur at the earliest when we unmask // it, and at the latest just after the ISB. + .global abort_guest_exit_start abort_guest_exit_start:
isb
+ .global abort_guest_exit_end abort_guest_exit_end: - msr daifset, #4 // Mask aborts - ret - - _kvm_extable abort_guest_exit_start, 9997f - _kvm_extable abort_guest_exit_end, 9997f -9997: - msr daifset, #4 // Mask aborts - mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT)
- // restore the EL1 exception context so that we can report some - // information. Merge the exception code with the SError pending bit. + // If the exception took place, restore the EL1 exception + // context so that we can report some information. + // Merge the exception code with the SError pending bit. + tbz x0, #ARM_EXIT_WITH_SERROR_BIT, 1f msr elr_el2, x2 msr esr_el2, x3 msr spsr_el2, x4 diff --git a/arch/arm64/kvm/hyp/hyp-entry.S b/arch/arm64/kvm/hyp/hyp-entry.S index c3e4ae84f3a4..71591a6ee63e 100644 --- a/arch/arm64/kvm/hyp/hyp-entry.S +++ b/arch/arm64/kvm/hyp/hyp-entry.S @@ -26,30 +26,6 @@ #include <asm/kvm_mmu.h> #include <asm/mmu.h>
-.macro save_caller_saved_regs_vect - /* x0 and x1 were saved in the vector entry */ - stp x2, x3, [sp, #-16]! - stp x4, x5, [sp, #-16]! - stp x6, x7, [sp, #-16]! - stp x8, x9, [sp, #-16]! - stp x10, x11, [sp, #-16]! - stp x12, x13, [sp, #-16]! - stp x14, x15, [sp, #-16]! - stp x16, x17, [sp, #-16]! -.endm - -.macro restore_caller_saved_regs_vect - ldp x16, x17, [sp], #16 - ldp x14, x15, [sp], #16 - ldp x12, x13, [sp], #16 - ldp x10, x11, [sp], #16 - ldp x8, x9, [sp], #16 - ldp x6, x7, [sp], #16 - ldp x4, x5, [sp], #16 - ldp x2, x3, [sp], #16 - ldp x0, x1, [sp], #16 -.endm - .text .pushsection .hyp.text, "ax"
@@ -209,14 +185,27 @@ el2_sync:
el2_error: - save_caller_saved_regs_vect - stp x29, x30, [sp, #-16]! - - bl kvm_unexpected_el2_exception - - ldp x29, x30, [sp], #16 - restore_caller_saved_regs_vect + ldp x0, x1, [sp], #16
+ /* + * Only two possibilities: + * 1) Either we come from the exit path, having just unmasked + * PSTATE.A: change the return code to an EL2 fault, and + * carry on, as we're already in a sane state to handle it. + * 2) Or we come from anywhere else, and that's a bug: we panic. + * + * For (1), x0 contains the original return code and x1 doesn't + * contain anything meaningful at that stage. We can reuse them + * as temp registers. + * For (2), who cares? + */ + mrs x0, elr_el2 + adr x1, abort_guest_exit_start + cmp x0, x1 + adr x1, abort_guest_exit_end + ccmp x0, x1, #4, ne + b.ne __hyp_panic + mov x0, #(1 << ARM_EXIT_WITH_SERROR_BIT) eret sb
diff --git a/arch/arm64/kvm/hyp/switch.c b/arch/arm64/kvm/hyp/switch.c index e9ea7cf3e98f..acd2d84b190c 100644 --- a/arch/arm64/kvm/hyp/switch.c +++ b/arch/arm64/kvm/hyp/switch.c @@ -25,7 +25,6 @@
#include <asm/barrier.h> #include <asm/cpufeature.h> -#include <asm/extable.h> #include <asm/kprobes.h> #include <asm/kvm_asm.h> #include <asm/kvm_emulate.h> @@ -37,9 +36,6 @@ #include <asm/processor.h> #include <asm/thread_info.h>
-extern struct exception_table_entry __start___kvm_ex_table; -extern struct exception_table_entry __stop___kvm_ex_table; - /* Check whether the FP regs were dirtied while in the host-side run loop: */ static bool __hyp_text update_fp_enabled(struct kvm_vcpu *vcpu) { @@ -730,30 +726,3 @@ void __hyp_text __noreturn hyp_panic(struct kvm_cpu_context *host_ctxt)
unreachable(); } - -asmlinkage void __hyp_text kvm_unexpected_el2_exception(void) -{ - unsigned long addr, fixup; - struct kvm_cpu_context *host_ctxt; - struct exception_table_entry *entry, *end; - unsigned long elr_el2 = read_sysreg(elr_el2); - - entry = hyp_symbol_addr(__start___kvm_ex_table); - end = hyp_symbol_addr(__stop___kvm_ex_table); - host_ctxt = __hyp_this_cpu_ptr(kvm_host_cpu_state); - - while (entry < end) { - addr = (unsigned long)&entry->insn + entry->insn; - fixup = (unsigned long)&entry->fixup + entry->fixup; - - if (addr != elr_el2) { - entry++; - continue; - } - - write_sysreg(fixup, elr_el2); - return; - } - - hyp_panic(host_ctxt); -}