Re: [PATCH v1 06/11] mm: support GUP-triggered unsharing via FAULT_FLAG_UNSHARE (!hugetlb)

From: Matthew Wilcox
Date: Mon Dec 20 2021 - 13:53:09 EST


On Mon, Dec 20, 2021 at 06:37:30PM +0000, Matthew Wilcox wrote:
> +++ b/mm/memory.c
> @@ -3626,7 +3626,7 @@ vm_fault_t do_swap_page(struct vm_fault *vmf)
> inc_mm_counter_fast(vma->vm_mm, MM_ANONPAGES);
> dec_mm_counter_fast(vma->vm_mm, MM_SWAPENTS);
> pte = mk_pte(page, vma->vm_page_prot);
> - if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page, NULL)) {
> + if ((vmf->flags & FAULT_FLAG_WRITE) && reuse_swap_page(page)) {
> pte = maybe_mkwrite(pte_mkdirty(pte), vma);
> vmf->flags &= ~FAULT_FLAG_WRITE;
> ret |= VM_FAULT_WRITE;
[...]
> @@ -1673,17 +1665,14 @@ static int page_trans_huge_map_swapcount(struct page *page, int *total_mapcount,
> * reuse_swap_page() returns false, but it may be always overwritten
> * (see the other implementation for CONFIG_SWAP=n).
> */
> -bool reuse_swap_page(struct page *page, int *total_map_swapcount)
> +bool reuse_swap_page(struct page *page)
> {
> - int count, total_mapcount, total_swapcount;
> + int count, total_swapcount;
>
> VM_BUG_ON_PAGE(!PageLocked(page), page);
> if (unlikely(PageKsm(page)))
> return false;
> - count = page_trans_huge_map_swapcount(page, &total_mapcount,
> - &total_swapcount);
> - if (total_map_swapcount)
> - *total_map_swapcount = total_mapcount + total_swapcount;
> + count = page_trans_huge_map_swapcount(page, &total_swapcount);
> if (count == 1 && PageSwapCache(page) &&
> (likely(!PageTransCompound(page)) ||
> /* The remaining swap count will be freed soon */

It makes me wonder if reuse_swap_page() can also be based on refcount
instead of mapcount?