KVM: arm64: Simplify handling of ARCH_WORKAROUND_2
authorMarc Zyngier <maz@kernel.org>
Fri, 18 Sep 2020 11:25:40 +0000 (12:25 +0100)
committerWill Deacon <will@kernel.org>
Tue, 29 Sep 2020 15:08:16 +0000 (16:08 +0100)
Owing to the fact that the host kernel is always mitigated, we can
drastically simplify the WA2 handling by keeping the mitigation
state ON when entering the guest. This means the guest is either
unaffected or not mitigated.

This results in a nice simplification of the mitigation space,
and the removal of a lot of code that was never really used anyway.

Signed-off-by: Marc Zyngier <maz@kernel.org>
Signed-off-by: Will Deacon <will@kernel.org>
14 files changed:
arch/arm64/include/asm/kvm_asm.h
arch/arm64/include/asm/kvm_emulate.h
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/include/uapi/asm/kvm.h
arch/arm64/kernel/cpu_errata.c
arch/arm64/kernel/image-vars.h
arch/arm64/kvm/arm.c
arch/arm64/kvm/hyp/hyp-entry.S
arch/arm64/kvm/hyp/include/hyp/switch.h
arch/arm64/kvm/hyp/nvhe/switch.c
arch/arm64/kvm/hyp/vhe/switch.c
arch/arm64/kvm/hypercalls.c
arch/arm64/kvm/psci.c
arch/arm64/kvm/reset.c

index e9378cc..abe02cf 100644 (file)
@@ -9,9 +9,6 @@
 
 #include <asm/virt.h>
 
-#define        VCPU_WORKAROUND_2_FLAG_SHIFT    0
-#define        VCPU_WORKAROUND_2_FLAG          (_AC(1, UL) << VCPU_WORKAROUND_2_FLAG_SHIFT)
-
 #define ARM_EXIT_WITH_SERROR_BIT  31
 #define ARM_EXCEPTION_CODE(x)    ((x) & ~(1U << ARM_EXIT_WITH_SERROR_BIT))
 #define ARM_EXCEPTION_IS_TRAP(x)  (ARM_EXCEPTION_CODE((x)) == ARM_EXCEPTION_TRAP)
index 49a55be..96eccb1 100644 (file)
@@ -383,20 +383,6 @@ static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
        return vcpu_read_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
 }
 
-static inline bool kvm_arm_get_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu)
-{
-       return vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG;
-}
-
-static inline void kvm_arm_set_vcpu_workaround_2_flag(struct kvm_vcpu *vcpu,
-                                                     bool flag)
-{
-       if (flag)
-               vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
-       else
-               vcpu->arch.workaround_flags &= ~VCPU_WORKAROUND_2_FLAG;
-}
-
 static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
 {
        if (vcpu_mode_is_32bit(vcpu)) {
index 873e124..36606ef 100644 (file)
@@ -526,23 +526,6 @@ static inline int kvm_map_vectors(void)
 }
 #endif
 
-DECLARE_PER_CPU_READ_MOSTLY(u64, arm64_ssbd_callback_required);
-
-static inline int hyp_map_aux_data(void)
-{
-       int cpu, err;
-
-       for_each_possible_cpu(cpu) {
-               u64 *ptr;
-
-               ptr = per_cpu_ptr(&arm64_ssbd_callback_required, cpu);
-               err = create_hyp_mappings(ptr, ptr + 1, PAGE_HYP);
-               if (err)
-                       return err;
-       }
-       return 0;
-}
-
 #define kvm_phys_to_vttbr(addr)                phys_to_ttbr(addr)
 
 /*
index ba85bb2..7d804fd 100644 (file)
@@ -242,6 +242,15 @@ struct kvm_vcpu_events {
 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL          0
 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_AVAIL              1
 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_REQUIRED       2
+
+/*
+ * Only two states can be presented by the host kernel:
+ * - NOT_REQUIRED: the guest doesn't need to do anything
+ * - NOT_AVAIL: the guest isn't mitigated (it can still use SSBS if available)
+ *
+ * All the other values are deprecated. The host still accepts all
+ * values (they are ABI), but will narrow them to the above two.
+ */
 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2    KVM_REG_ARM_FW_REG(2)
 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL          0
 #define KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN            1
index 7fc54c3..7e9caef 100644 (file)
@@ -108,20 +108,6 @@ cpu_enable_trap_ctr_access(const struct arm64_cpu_capabilities *cap)
 
 int ssbd_state __read_mostly = ARM64_SSBD_UNKNOWN;
 
-void __init arm64_enable_wa2_handling(struct alt_instr *alt,
-                                     __le32 *origptr, __le32 *updptr,
-                                     int nr_inst)
-{
-       BUG_ON(nr_inst != 1);
-       /*
-        * Only allow mitigation on EL1 entry/exit and guest
-        * ARCH_WORKAROUND_2 handling if the SSBD state allows it to
-        * be flipped.
-        */
-       if (arm64_get_ssbd_state() == ARM64_SSBD_KERNEL)
-               *updptr = cpu_to_le32(aarch64_insn_gen_nop());
-}
-
 #ifdef CONFIG_ARM64_ERRATUM_1463225
 DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
 
index 8982b68..d0f3f35 100644 (file)
@@ -64,12 +64,10 @@ __efistub__ctype            = _ctype;
 #define KVM_NVHE_ALIAS(sym) __kvm_nvhe_##sym = sym;
 
 /* Alternative callbacks for init-time patching of nVHE hyp code. */
-KVM_NVHE_ALIAS(arm64_enable_wa2_handling);
 KVM_NVHE_ALIAS(kvm_patch_vector_branch);
 KVM_NVHE_ALIAS(kvm_update_va_mask);
 
 /* Global kernel state accessed by nVHE hyp code. */
-KVM_NVHE_ALIAS(arm64_ssbd_callback_required);
 KVM_NVHE_ALIAS(kvm_host_data);
 KVM_NVHE_ALIAS(kvm_vgic_global_state);
 
index 46dc3d7..b1bd2f4 100644 (file)
@@ -1549,10 +1549,6 @@ static int init_hyp_mode(void)
                }
        }
 
-       err = hyp_map_aux_data();
-       if (err)
-               kvm_err("Cannot map host auxiliary data: %d\n", err);
-
        return 0;
 
 out_err:
index db2dd75..3cca756 100644 (file)
@@ -116,33 +116,6 @@ el1_hvc_guest:
                          ARM_SMCCC_ARCH_WORKAROUND_2)
        cbnz    w1, el1_trap
 
-alternative_cb arm64_enable_wa2_handling
-       b       wa2_end
-alternative_cb_end
-       get_vcpu_ptr    x2, x0
-       ldr     x0, [x2, #VCPU_WORKAROUND_FLAGS]
-
-       // Sanitize the argument and update the guest flags
-       ldr     x1, [sp, #8]                    // Guest's x1
-       clz     w1, w1                          // Murphy's device:
-       lsr     w1, w1, #5                      // w1 = !!w1 without using
-       eor     w1, w1, #1                      // the flags...
-       bfi     x0, x1, #VCPU_WORKAROUND_2_FLAG_SHIFT, #1
-       str     x0, [x2, #VCPU_WORKAROUND_FLAGS]
-
-       /* Check that we actually need to perform the call */
-       hyp_ldr_this_cpu x0, arm64_ssbd_callback_required, x2
-       cbz     x0, wa2_end
-
-       mov     w0, #ARM_SMCCC_ARCH_WORKAROUND_2
-       smc     #0
-
-       /* Don't leak data from the SMC call */
-       mov     x3, xzr
-wa2_end:
-       mov     x2, xzr
-       mov     x1, xzr
-
 wa_epilogue:
        mov     x0, xzr
        add     sp, sp, #16
index 5dec558..a684082 100644 (file)
@@ -479,35 +479,6 @@ exit:
        return false;
 }
 
-static inline bool __needs_ssbd_off(struct kvm_vcpu *vcpu)
-{
-       if (!cpus_have_final_cap(ARM64_SPECTRE_V4))
-               return false;
-
-       return !(vcpu->arch.workaround_flags & VCPU_WORKAROUND_2_FLAG);
-}
-
-static inline void __set_guest_arch_workaround_state(struct kvm_vcpu *vcpu)
-{
-       /*
-        * The host runs with the workaround always present. If the
-        * guest wants it disabled, so be it...
-        */
-       if (__needs_ssbd_off(vcpu) &&
-           __hyp_this_cpu_read(arm64_ssbd_callback_required))
-               arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 0, NULL);
-}
-
-static inline void __set_host_arch_workaround_state(struct kvm_vcpu *vcpu)
-{
-       /*
-        * If the guest has disabled the workaround, bring it back on.
-        */
-       if (__needs_ssbd_off(vcpu) &&
-           __hyp_this_cpu_read(arm64_ssbd_callback_required))
-               arm_smccc_1_1_smc(ARM_SMCCC_ARCH_WORKAROUND_2, 1, NULL);
-}
-
 static inline void __kvm_unexpected_el2_exception(void)
 {
        unsigned long addr, fixup;
index 0970442..8d3dd4f 100644 (file)
@@ -202,8 +202,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
 
        __debug_switch_to_guest(vcpu);
 
-       __set_guest_arch_workaround_state(vcpu);
-
        do {
                /* Jump in the fire! */
                exit_code = __guest_enter(vcpu, host_ctxt);
@@ -211,8 +209,6 @@ int __kvm_vcpu_run(struct kvm_vcpu *vcpu)
                /* And we're baaack! */
        } while (fixup_guest_exit(vcpu, &exit_code));
 
-       __set_host_arch_workaround_state(vcpu);
-
        __sysreg_save_state_nvhe(guest_ctxt);
        __sysreg32_save_state(vcpu);
        __timer_disable_traps(vcpu);
index c1da4f8..ecf67e6 100644 (file)
@@ -131,8 +131,6 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
        sysreg_restore_guest_state_vhe(guest_ctxt);
        __debug_switch_to_guest(vcpu);
 
-       __set_guest_arch_workaround_state(vcpu);
-
        do {
                /* Jump in the fire! */
                exit_code = __guest_enter(vcpu, host_ctxt);
@@ -140,8 +138,6 @@ static int __kvm_vcpu_run_vhe(struct kvm_vcpu *vcpu)
                /* And we're baaack! */
        } while (fixup_guest_exit(vcpu, &exit_code));
 
-       __set_host_arch_workaround_state(vcpu);
-
        sysreg_save_guest_state_vhe(guest_ctxt);
 
        __deactivate_traps(vcpu);
index 413d46b..69e023d 100644 (file)
@@ -36,15 +36,13 @@ int kvm_hvc_call_handler(struct kvm_vcpu *vcpu)
                        }
                        break;
                case ARM_SMCCC_ARCH_WORKAROUND_2:
-                       switch (kvm_arm_have_ssbd()) {
-                       case KVM_SSBD_FORCE_DISABLE:
-                       case KVM_SSBD_UNKNOWN:
+                       switch (arm64_get_ssbd_state()) {
+                       case ARM64_SSBD_FORCE_DISABLE:
+                       case ARM64_SSBD_UNKNOWN:
                                break;
-                       case KVM_SSBD_KERNEL:
-                               val = SMCCC_RET_SUCCESS;
-                               break;
-                       case KVM_SSBD_FORCE_ENABLE:
-                       case KVM_SSBD_MITIGATED:
+                       case ARM64_SSBD_KERNEL:
+                       case ARM64_SSBD_FORCE_ENABLE:
+                       case ARM64_SSBD_MITIGATED:
                                val = SMCCC_RET_NOT_REQUIRED;
                                break;
                        }
index fbdd6f3..87e6e38 100644 (file)
@@ -435,17 +435,15 @@ static int get_kernel_wa_level(u64 regid)
                }
                return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1_NOT_AVAIL;
        case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
-               switch (kvm_arm_have_ssbd()) {
-               case KVM_SSBD_FORCE_DISABLE:
-                       return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
-               case KVM_SSBD_KERNEL:
-                       return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL;
-               case KVM_SSBD_FORCE_ENABLE:
-               case KVM_SSBD_MITIGATED:
+               switch (arm64_get_ssbd_state()) {
+               case ARM64_SSBD_FORCE_ENABLE:
+               case ARM64_SSBD_MITIGATED:
+               case ARM64_SSBD_KERNEL:
                        return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED;
-               case KVM_SSBD_UNKNOWN:
+               case ARM64_SSBD_UNKNOWN:
+               case ARM64_SSBD_FORCE_DISABLE:
                default:
-                       return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN;
+                       return KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
                }
        }
 
@@ -462,14 +460,8 @@ int kvm_arm_get_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
                val = kvm_psci_version(vcpu, vcpu->kvm);
                break;
        case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_1:
-               val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
-               break;
        case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2:
                val = get_kernel_wa_level(reg->id) & KVM_REG_FEATURE_LEVEL_MASK;
-
-               if (val == KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL &&
-                   kvm_arm_get_vcpu_workaround_2_flag(vcpu))
-                       val |= KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED;
                break;
        default:
                return -ENOENT;
@@ -527,34 +519,35 @@ int kvm_arm_set_fw_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg)
                            KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED))
                        return -EINVAL;
 
-               wa_level = val & KVM_REG_FEATURE_LEVEL_MASK;
-
-               if (get_kernel_wa_level(reg->id) < wa_level)
-                       return -EINVAL;
-
                /* The enabled bit must not be set unless the level is AVAIL. */
-               if (wa_level != KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL &&
-                   wa_level != val)
+               if ((val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED) &&
+                   (val & KVM_REG_FEATURE_LEVEL_MASK) != KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL)
                        return -EINVAL;
 
-               /* Are we finished or do we need to check the enable bit ? */
-               if (kvm_arm_have_ssbd() != KVM_SSBD_KERNEL)
-                       return 0;
-
                /*
-                * If this kernel supports the workaround to be switched on
-                * or off, make sure it matches the requested setting.
+                * Map all the possible incoming states to the only two we
+                * really want to deal with.
                 */
-               switch (wa_level) {
-               case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL:
-                       kvm_arm_set_vcpu_workaround_2_flag(vcpu,
-                           val & KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_ENABLED);
+               switch (val & KVM_REG_FEATURE_LEVEL_MASK) {
+               case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL:
+               case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_UNKNOWN:
+                       wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_AVAIL;
                        break;
+               case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_AVAIL:
                case KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED:
-                       kvm_arm_set_vcpu_workaround_2_flag(vcpu, true);
+                       wa_level = KVM_REG_ARM_SMCCC_ARCH_WORKAROUND_2_NOT_REQUIRED;
                        break;
+               default:
+                       return -EINVAL;
                }
 
+               /*
+                * We can deal with NOT_AVAIL on NOT_REQUIRED, but not the
+                * other way around.
+                */
+               if (get_kernel_wa_level(reg->id) < wa_level)
+                       return -EINVAL;
+
                return 0;
        default:
                return -ENOENT;
index ee33875..f6e8b4a 100644 (file)
@@ -319,10 +319,6 @@ int kvm_reset_vcpu(struct kvm_vcpu *vcpu)
                vcpu->arch.reset_state.reset = false;
        }
 
-       /* Default workaround setup is enabled (if supported) */
-       if (kvm_arm_have_ssbd() == KVM_SSBD_KERNEL)
-               vcpu->arch.workaround_flags |= VCPU_WORKAROUND_2_FLAG;
-
        /* Reset timer */
        ret = kvm_timer_vcpu_reset(vcpu);
 out: