From d8f4cda748eaf1edc2c31275c854277a2aaaa4cf Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 10 Oct 2024 11:24:11 -0700 Subject: KVM: MIPS: Mark "struct page" pfns dirty only in "slow" page fault path Mark pages/folios dirty only the slow page fault path, i.e. only when mmu_lock is held and the operation is mmu_notifier-protected, as marking a page/folio dirty after it has been written back can make some filesystems unhappy (backing KVM guests will such filesystem files is uncommon, and the race is minuscule, hence the lack of complaints). See the link below for details. Link: https://lore.kernel.org/all/cover.1683044162.git.lstoakes@gmail.com Signed-off-by: Sean Christopherson Tested-by: Dmitry Osipenko Signed-off-by: Paolo Bonzini Message-ID: <20241010182427.1434605-70-seanjc@google.com> --- arch/mips/kvm/mmu.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) (limited to 'arch/mips/kvm/mmu.c') diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index c17157e700c0..4da9ce4eb54d 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c @@ -514,7 +514,6 @@ static int _kvm_mips_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, set_pte(ptep, pte_mkdirty(*ptep)); pfn = pte_pfn(*ptep); mark_page_dirty(kvm, gfn); - kvm_set_pfn_dirty(pfn); } if (out_entry) @@ -628,7 +627,6 @@ retry: if (write_fault) { prot_bits |= __WRITEABLE; mark_page_dirty(kvm, gfn); - kvm_set_pfn_dirty(pfn); } } entry = pfn_pte(pfn, __pgprot(prot_bits)); @@ -642,6 +640,9 @@ retry: if (out_buddy) *out_buddy = *ptep_buddy(ptep); + if (writeable) + kvm_set_pfn_dirty(pfn); + spin_unlock(&kvm->mmu_lock); kvm_release_pfn_clean(pfn); kvm_set_pfn_accessed(pfn); -- cgit v1.2.3 From 4d75f14fc869d8609fb1ac90085b3450898b83f5 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 10 Oct 2024 11:24:12 -0700 Subject: KVM: MIPS: Mark "struct page" pfns accessed only in "slow" page fault path Mark pages accessed only in the slow page fault path in order to remove an unnecessary user of kvm_pfn_to_refcounted_page(). Marking pages accessed in the primary MMU during KVM page fault handling isn't harmful, but it's largely pointless and likely a waste of a cycles since the primary MMU will call into KVM via mmu_notifiers when aging pages. I.e. KVM participates in a "pull" model, so there's no need to also "push" updates. Signed-off-by: Sean Christopherson Tested-by: Dmitry Osipenko Signed-off-by: Paolo Bonzini Message-ID: <20241010182427.1434605-71-seanjc@google.com> --- arch/mips/kvm/mmu.c | 12 ++---------- 1 file changed, 2 insertions(+), 10 deletions(-) (limited to 'arch/mips/kvm/mmu.c') diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index 4da9ce4eb54d..f1e4b618ec6d 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c @@ -484,8 +484,6 @@ static int _kvm_mips_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, struct kvm *kvm = vcpu->kvm; gfn_t gfn = gpa >> PAGE_SHIFT; pte_t *ptep; - kvm_pfn_t pfn = 0; /* silence bogus GCC warning */ - bool pfn_valid = false; int ret = 0; spin_lock(&kvm->mmu_lock); @@ -498,12 +496,9 @@ static int _kvm_mips_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, } /* Track access to pages marked old */ - if (!pte_young(*ptep)) { + if (!pte_young(*ptep)) set_pte(ptep, pte_mkyoung(*ptep)); - pfn = pte_pfn(*ptep); - pfn_valid = true; - /* call kvm_set_pfn_accessed() after unlock */ - } + if (write_fault && !pte_dirty(*ptep)) { if (!pte_write(*ptep)) { ret = -EFAULT; @@ -512,7 +507,6 @@ static int _kvm_mips_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, /* Track dirtying of writeable pages */ set_pte(ptep, pte_mkdirty(*ptep)); - pfn = pte_pfn(*ptep); mark_page_dirty(kvm, gfn); } @@ -523,8 +517,6 @@ static int _kvm_mips_map_page_fast(struct kvm_vcpu *vcpu, unsigned long gpa, out: spin_unlock(&kvm->mmu_lock); - if (pfn_valid) - kvm_set_pfn_accessed(pfn); return ret; } -- cgit v1.2.3 From 13d66fddaaa40c82a664cbec0ac9d31b7771a396 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 10 Oct 2024 11:24:13 -0700 Subject: KVM: MIPS: Mark "struct page" pfns accessed prior to dropping mmu_lock Mark pages accessed before dropping mmu_lock when faulting in guest memory so that MIPS can convert to kvm_release_faultin_page() without tripping its lockdep assertion on mmu_lock being held. Signed-off-by: Sean Christopherson Tested-by: Dmitry Osipenko Signed-off-by: Paolo Bonzini Message-ID: <20241010182427.1434605-72-seanjc@google.com> --- arch/mips/kvm/mmu.c | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) (limited to 'arch/mips/kvm/mmu.c') diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index f1e4b618ec6d..69463ab24d97 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c @@ -634,10 +634,9 @@ retry: if (writeable) kvm_set_pfn_dirty(pfn); + kvm_release_pfn_clean(pfn); spin_unlock(&kvm->mmu_lock); - kvm_release_pfn_clean(pfn); - kvm_set_pfn_accessed(pfn); out: srcu_read_unlock(&kvm->srcu, srcu_idx); return err; -- cgit v1.2.3 From 7e8f1aa59d0b6c6ac2d539008d452357c52773c3 Mon Sep 17 00:00:00 2001 From: Sean Christopherson Date: Thu, 10 Oct 2024 11:24:14 -0700 Subject: KVM: MIPS: Use kvm_faultin_pfn() to map pfns into the guest Convert MIPS to kvm_faultin_pfn()+kvm_release_faultin_page(), which are new APIs to consolidate arch code and provide consistent behavior across all KVM architectures. Signed-off-by: Sean Christopherson Tested-by: Dmitry Osipenko Signed-off-by: Paolo Bonzini Message-ID: <20241010182427.1434605-73-seanjc@google.com> --- arch/mips/kvm/mmu.c | 14 ++++++-------- 1 file changed, 6 insertions(+), 8 deletions(-) (limited to 'arch/mips/kvm/mmu.c') diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c index 69463ab24d97..d2c3b6b41f18 100644 --- a/arch/mips/kvm/mmu.c +++ b/arch/mips/kvm/mmu.c @@ -557,6 +557,7 @@ static int kvm_mips_map_page(struct kvm_vcpu *vcpu, unsigned long gpa, bool writeable; unsigned long prot_bits; unsigned long mmu_seq; + struct page *page; /* Try the fast path to handle old / clean pages */ srcu_idx = srcu_read_lock(&kvm->srcu); @@ -578,7 +579,7 @@ retry: mmu_seq = kvm->mmu_invalidate_seq; /* * Ensure the read of mmu_invalidate_seq isn't reordered with PTE reads - * in gfn_to_pfn_prot() (which calls get_user_pages()), so that we don't + * in kvm_faultin_pfn() (which calls get_user_pages()), so that we don't * risk the page we get a reference to getting unmapped before we have a * chance to grab the mmu_lock without mmu_invalidate_retry() noticing. * @@ -590,7 +591,7 @@ retry: smp_rmb(); /* Slow path - ask KVM core whether we can access this GPA */ - pfn = gfn_to_pfn_prot(kvm, gfn, write_fault, &writeable); + pfn = kvm_faultin_pfn(vcpu, gfn, write_fault, &writeable, &page); if (is_error_noslot_pfn(pfn)) { err = -EFAULT; goto out; @@ -602,10 +603,10 @@ retry: /* * This can happen when mappings are changed asynchronously, but * also synchronously if a COW is triggered by - * gfn_to_pfn_prot(). + * kvm_faultin_pfn(). */ spin_unlock(&kvm->mmu_lock); - kvm_release_pfn_clean(pfn); + kvm_release_page_unused(page); goto retry; } @@ -632,10 +633,7 @@ retry: if (out_buddy) *out_buddy = *ptep_buddy(ptep); - if (writeable) - kvm_set_pfn_dirty(pfn); - kvm_release_pfn_clean(pfn); - + kvm_release_faultin_page(kvm, page, false, writeable); spin_unlock(&kvm->mmu_lock); out: srcu_read_unlock(&kvm->srcu, srcu_idx); -- cgit v1.2.3