int (*set_identity_map_addr)(struct kvm *kvm, u64 ident_addr);
int (*get_tdp_level)(struct kvm_vcpu *vcpu);
u64 (*get_mt_mask)(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio);
- int (*get_lpage_level)(void);
void (*set_tdp_cr3)(struct kvm_vcpu *vcpu, unsigned long cr3);
void kvm_mmu_invpcid_gva(struct kvm_vcpu *vcpu, gva_t gva, unsigned long pcid);
void kvm_mmu_new_cr3(struct kvm_vcpu *vcpu, gpa_t new_cr3, bool skip_tlb_flush);
-void kvm_configure_mmu(bool enable_tdp);
+void kvm_configure_mmu(bool enable_tdp, int tdp_page_level);
static inline gpa_t translate_gpa(struct kvm_vcpu *vcpu, gpa_t gpa, u32 access,
struct x86_exception *exception)
*/
bool tdp_enabled = false;
+static int max_page_level __read_mostly;
+
enum {
AUDIT_PRE_PAGE_FAULT,
AUDIT_POST_PAGE_FAULT,
if (!slot)
return PT_PAGE_TABLE_LEVEL;
- max_level = min(max_level, kvm_x86_ops->get_lpage_level());
+ max_level = min(max_level, max_page_level);
for ( ; max_level > PT_PAGE_TABLE_LEVEL; max_level--) {
linfo = lpage_info_slot(gfn, slot, max_level);
if (!linfo->disallow_lpage)
}
EXPORT_SYMBOL_GPL(kvm_mmu_invpcid_gva);
-void kvm_configure_mmu(bool enable_tdp)
+void kvm_configure_mmu(bool enable_tdp, int tdp_page_level)
{
tdp_enabled = enable_tdp;
+
+ /*
+ * max_page_level reflects the capabilities of KVM's MMU irrespective
+ * of kernel support, e.g. KVM may be capable of using 1GB pages when
+ * the kernel is not. But, KVM never creates a page size greater than
+ * what is used by the kernel for any given HVA, i.e. the kernel's
+ * capabilities are ultimately consulted by kvm_mmu_hugepage_adjust().
+ */
+ if (tdp_enabled)
+ max_page_level = tdp_page_level;
+ else if (boot_cpu_has(X86_FEATURE_GBPAGES))
+ max_page_level = PT_PDPE_LEVEL;
+ else
+ max_page_level = PT_DIRECTORY_LEVEL;
}
EXPORT_SYMBOL_GPL(kvm_configure_mmu);
if (npt_enabled && !npt)
npt_enabled = false;
- kvm_configure_mmu(npt_enabled);
+ kvm_configure_mmu(npt_enabled, PT_PDPE_LEVEL);
pr_info("kvm: Nested Paging %sabled\n", npt_enabled ? "en" : "dis");
if (nrips) {
}
}
-static int svm_get_lpage_level(void)
-{
- return PT_PDPE_LEVEL;
-}
-
static bool svm_has_wbinvd_exit(void)
{
return true;
.get_exit_info = svm_get_exit_info,
- .get_lpage_level = svm_get_lpage_level,
-
.cpuid_update = svm_cpuid_update,
.set_supported_cpuid = svm_set_supported_cpuid,
return (cache << VMX_EPT_MT_EPTE_SHIFT) | ipat;
}
-static int vmx_get_lpage_level(void)
-{
- if (enable_ept && !cpu_has_vmx_ept_1g_page())
- return PT_DIRECTORY_LEVEL;
- else
- /* For shadow and EPT supported 1GB page */
- return PT_PDPE_LEVEL;
-}
-
static void vmcs_set_secondary_exec_control(struct vcpu_vmx *vmx)
{
/*
{
unsigned long host_bndcfgs;
struct desc_ptr dt;
- int r, i;
+ int r, i, ept_lpage_level;
rdmsrl_safe(MSR_EFER, &host_efer);
if (enable_ept)
vmx_enable_tdp();
- kvm_configure_mmu(enable_ept);
+
+ if (!enable_ept)
+ ept_lpage_level = 0;
+ else if (cpu_has_vmx_ept_1g_page())
+ ept_lpage_level = PT_PDPE_LEVEL;
+ else if (cpu_has_vmx_ept_2m_page())
+ ept_lpage_level = PT_DIRECTORY_LEVEL;
+ else
+ ept_lpage_level = PT_PAGE_TABLE_LEVEL;
+ kvm_configure_mmu(enable_ept, ept_lpage_level);
/*
* Only enable PML when hardware supports PML feature, and both EPT
.get_exit_info = vmx_get_exit_info,
- .get_lpage_level = vmx_get_lpage_level,
-
.cpuid_update = vmx_cpuid_update,
.set_supported_cpuid = vmx_set_supported_cpuid,