arm64: KVM: Allow mapping of vectors outside of the RAM region
authorMarc Zyngier <marc.zyngier@arm.com>
Thu, 15 Feb 2018 11:47:14 +0000 (11:47 +0000)
committerMarc Zyngier <marc.zyngier@arm.com>
Mon, 19 Mar 2018 13:06:46 +0000 (13:06 +0000)
We're now ready to map our vectors in weird and wonderful locations.
On enabling ARM64_HARDEN_EL2_VECTORS, a vector slot gets allocated
if this hasn't been already done via ARM64_HARDEN_BRANCH_PREDICTOR
and gets mapped outside of the normal RAM region, next to the
idmap.

That way, being able to obtain VBAR_EL2 doesn't reveal the mapping
of the rest of the hypervisor code.

Acked-by: Catalin Marinas <catalin.marinas@arm.com>
Signed-off-by: Marc Zyngier <marc.zyngier@arm.com>
Documentation/arm64/memory.txt
arch/arm64/Kconfig
arch/arm64/include/asm/kvm_mmu.h
arch/arm64/include/asm/mmu.h
arch/arm64/kvm/Kconfig
arch/arm64/kvm/va_layout.c

index c58cc5d..c5dab30 100644 (file)
@@ -90,7 +90,8 @@ When using KVM without the Virtualization Host Extensions, the
 hypervisor maps kernel pages in EL2 at a fixed (and potentially
 random) offset from the linear mapping. See the kern_hyp_va macro and
 kvm_update_va_mask function for more details. MMIO devices such as
-GICv2 gets mapped next to the HYP idmap page.
+GICv2 gets mapped next to the HYP idmap page, as do vectors when
+ARM64_HARDEN_EL2_VECTORS is selected for particular CPUs.
 
 When using KVM with the Virtualization Host Extensions, no additional
 mappings are created, since the host kernel runs directly in EL2.
index 7381eeb..48ad7ca 100644 (file)
@@ -904,6 +904,22 @@ config HARDEN_BRANCH_PREDICTOR
 
          If unsure, say Y.
 
+config HARDEN_EL2_VECTORS
+       bool "Harden EL2 vector mapping against system register leak" if EXPERT
+       default y
+       help
+         Speculation attacks against some high-performance processors can
+         be used to leak privileged information such as the vector base
+         register, resulting in a potential defeat of the EL2 layout
+         randomization.
+
+         This config option will map the vectors to a fixed location,
+         independent of the EL2 code mapping, so that revealing VBAR_EL2
+         to an attacker does not give away any extra information. This
+         only gets enabled on affected CPUs.
+
+         If unsure, say Y.
+
 menuconfig ARMV8_DEPRECATED
        bool "Emulate deprecated/obsolete ARMv8 instructions"
        depends on COMPAT
index eb04437..0821109 100644 (file)
@@ -360,31 +360,91 @@ static inline unsigned int kvm_get_vmid_bits(void)
        return (cpuid_feature_extract_unsigned_field(reg, ID_AA64MMFR1_VMIDBITS_SHIFT) == 2) ? 16 : 8;
 }
 
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+#ifdef CONFIG_KVM_INDIRECT_VECTORS
+/*
+ * EL2 vectors can be mapped and rerouted in a number of ways,
+ * depending on the kernel configuration and CPU present:
+ *
+ * - If the CPU has the ARM64_HARDEN_BRANCH_PREDICTOR cap, the
+ *   hardening sequence is placed in one of the vector slots, which is
+ *   executed before jumping to the real vectors.
+ *
+ * - If the CPU has both the ARM64_HARDEN_EL2_VECTORS cap and the
+ *   ARM64_HARDEN_BRANCH_PREDICTOR cap, the slot containing the
+ *   hardening sequence is mapped next to the idmap page, and executed
+ *   before jumping to the real vectors.
+ *
+ * - If the CPU only has the ARM64_HARDEN_EL2_VECTORS cap, then an
+ *   empty slot is selected, mapped next to the idmap page, and
+ *   executed before jumping to the real vectors.
+ *
+ * Note that ARM64_HARDEN_EL2_VECTORS is somewhat incompatible with
+ * VHE, as we don't have hypervisor-specific mappings. If the system
+ * is VHE and yet selects this capability, it will be ignored.
+ */
 #include <asm/mmu.h>
 
+extern void *__kvm_bp_vect_base;
+extern int __kvm_harden_el2_vector_slot;
+
 static inline void *kvm_get_hyp_vector(void)
 {
        struct bp_hardening_data *data = arm64_get_bp_hardening_data();
-       void *vect = kvm_ksym_ref(__kvm_hyp_vector);
+       void *vect = kern_hyp_va(kvm_ksym_ref(__kvm_hyp_vector));
+       int slot = -1;
 
-       if (data->fn) {
-               vect = __bp_harden_hyp_vecs_start +
-                      data->hyp_vectors_slot * SZ_2K;
+       if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR) && data->fn) {
+               vect = kern_hyp_va(kvm_ksym_ref(__bp_harden_hyp_vecs_start));
+               slot = data->hyp_vectors_slot;
+       }
 
-               if (!has_vhe())
-                       vect = lm_alias(vect);
+       if (this_cpu_has_cap(ARM64_HARDEN_EL2_VECTORS) && !has_vhe()) {
+               vect = __kvm_bp_vect_base;
+               if (slot == -1)
+                       slot = __kvm_harden_el2_vector_slot;
        }
 
-       vect = kern_hyp_va(vect);
+       if (slot != -1)
+               vect += slot * SZ_2K;
+
        return vect;
 }
 
+/*  This is only called on a !VHE system */
 static inline int kvm_map_vectors(void)
 {
+       /*
+        * HBP  = ARM64_HARDEN_BRANCH_PREDICTOR
+        * HEL2 = ARM64_HARDEN_EL2_VECTORS
+        *
+        * !HBP + !HEL2 -> use direct vectors
+        *  HBP + !HEL2 -> use hardened vectors in place
+        * !HBP +  HEL2 -> allocate one vector slot and use exec mapping
+        *  HBP +  HEL2 -> use hardened vertors and use exec mapping
+        */
+       if (cpus_have_const_cap(ARM64_HARDEN_BRANCH_PREDICTOR)) {
+               __kvm_bp_vect_base = kvm_ksym_ref(__bp_harden_hyp_vecs_start);
+               __kvm_bp_vect_base = kern_hyp_va(__kvm_bp_vect_base);
+       }
+
+       if (cpus_have_const_cap(ARM64_HARDEN_EL2_VECTORS)) {
+               phys_addr_t vect_pa = __pa_symbol(__bp_harden_hyp_vecs_start);
+               unsigned long size = (__bp_harden_hyp_vecs_end -
+                                     __bp_harden_hyp_vecs_start);
+
+               /*
+                * Always allocate a spare vector slot, as we don't
+                * know yet which CPUs have a BP hardening slot that
+                * we can reuse.
+                */
+               __kvm_harden_el2_vector_slot = atomic_inc_return(&arm64_el2_vector_last_slot);
+               BUG_ON(__kvm_harden_el2_vector_slot >= BP_HARDEN_EL2_SLOTS);
+               return create_hyp_exec_mappings(vect_pa, size,
+                                               &__kvm_bp_vect_base);
+       }
+
        return 0;
 }
-
 #else
 static inline void *kvm_get_hyp_vector(void)
 {
index 3baf010..dd320df 100644 (file)
@@ -51,10 +51,13 @@ struct bp_hardening_data {
        bp_hardening_cb_t       fn;
 };
 
-#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
+#if (defined(CONFIG_HARDEN_BRANCH_PREDICTOR) ||        \
+     defined(CONFIG_HARDEN_EL2_VECTORS))
 extern char __bp_harden_hyp_vecs_start[], __bp_harden_hyp_vecs_end[];
 extern atomic_t arm64_el2_vector_last_slot;
+#endif  /* CONFIG_HARDEN_BRANCH_PREDICTOR || CONFIG_HARDEN_EL2_VECTORS */
 
+#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
 DECLARE_PER_CPU_READ_MOSTLY(struct bp_hardening_data, bp_hardening_data);
 
 static inline struct bp_hardening_data *arm64_get_bp_hardening_data(void)
index bd8cc03..a2e3a5a 100644 (file)
@@ -58,7 +58,7 @@ config KVM_ARM_PMU
          virtual machines.
 
 config KVM_INDIRECT_VECTORS
-       def_bool KVM && HARDEN_BRANCH_PREDICTOR
+       def_bool KVM && (HARDEN_BRANCH_PREDICTOR || HARDEN_EL2_VECTORS)
 
 source drivers/vhost/Kconfig
 
index 2deb6e9..c712a73 100644 (file)
@@ -151,6 +151,9 @@ void __init kvm_update_va_mask(struct alt_instr *alt,
        }
 }
 
+void *__kvm_bp_vect_base;
+int __kvm_harden_el2_vector_slot;
+
 void kvm_patch_vector_branch(struct alt_instr *alt,
                             __le32 *origptr, __le32 *updptr, int nr_inst)
 {