Merge branch 'kvm-arm64/pt-new' into kvmarm-master/next
authorMarc Zyngier <maz@kernel.org>
Fri, 11 Sep 2020 14:54:30 +0000 (15:54 +0100)
committerMarc Zyngier <maz@kernel.org>
Fri, 11 Sep 2020 14:54:30 +0000 (15:54 +0100)
Signed-off-by: Marc Zyngier <maz@kernel.org>
1  2 
arch/arm64/include/asm/kvm_host.h
arch/arm64/kvm/arm.c
arch/arm64/kvm/mmu.c
arch/x86/kvm/x86.c

Simple merge
Simple merge
@@@ -1871,25 -785,26 +785,27 @@@ static int user_mem_abort(struct kvm_vc
            !fault_supports_stage2_huge_mapping(memslot, hva, vma_pagesize)) {
                force_pte = true;
                vma_pagesize = PAGE_SIZE;
 +              vma_shift = PAGE_SHIFT;
        }
  
-       /*
-        * The stage2 has a minimum of 2 level table (For arm64 see
-        * kvm_arm_setup_stage2()). Hence, we are guaranteed that we can
-        * use PMD_SIZE huge mappings (even when the PMD is folded into PGD).
-        * As for PUD huge maps, we must make sure that we have at least
-        * 3 levels, i.e, PMD is not folded.
-        */
-       if (vma_pagesize == PMD_SIZE ||
-           (vma_pagesize == PUD_SIZE && kvm_stage2_has_pmd(kvm)))
-               gfn = (fault_ipa & huge_page_mask(hstate_vma(vma))) >> PAGE_SHIFT;
+       if (vma_pagesize == PMD_SIZE || vma_pagesize == PUD_SIZE)
+               fault_ipa &= huge_page_mask(hstate_vma(vma));
+       gfn = fault_ipa >> PAGE_SHIFT;
        mmap_read_unlock(current->mm);
  
-       /* We need minimum second+third level pages */
-       ret = kvm_mmu_topup_memory_cache(memcache, kvm_mmu_cache_min_pages(kvm));
-       if (ret)
-               return ret;
+       /*
+        * Permission faults just need to update the existing leaf entry,
+        * and so normally don't require allocations from the memcache. The
+        * only exception to this is when dirty logging is enabled at runtime
+        * and a write fault needs to collapse a block entry into a table.
+        */
+       if (fault_status != FSC_PERM || (logging_active && write_fault)) {
+               ret = kvm_mmu_topup_memory_cache(memcache,
+                                                kvm_mmu_cache_min_pages(kvm));
+               if (ret)
+                       return ret;
+       }
  
        mmu_seq = vcpu->kvm->mmu_notifier_seq;
        /*
Simple merge