mm/gup: refactor record_subpages() to find 1st small page

All the fast-gup functions take a tail page to operate, always need to do
page mask calculations before feeding that into record_subpages().

Merge that logic into record_subpages(), so that it will do the nth_page()
calculation.

Link: https://lkml.kernel.org/r/20240327152332.950956-8-peterx@redhat.com
Signed-off-by: Peter Xu <peterx@redhat.com>
Reviewed-by: Jason Gunthorpe <jgg@nvidia.com>
Tested-by: Ryan Roberts <ryan.roberts@arm.com>
Cc: Andrea Arcangeli <aarcange@redhat.com>
Cc: Andrew Jones <andrew.jones@linux.dev>
Cc: Aneesh Kumar K.V (IBM) <aneesh.kumar@kernel.org>
Cc: Axel Rasmussen <axelrasmussen@google.com>
Cc: Christophe Leroy <christophe.leroy@csgroup.eu>
Cc: Christoph Hellwig <hch@infradead.org>
Cc: David Hildenbrand <david@redhat.com>
Cc: James Houghton <jthoughton@google.com>
Cc: John Hubbard <jhubbard@nvidia.com>
Cc: Kirill A. Shutemov <kirill@shutemov.name>
Cc: Lorenzo Stoakes <lstoakes@gmail.com>
Cc: Matthew Wilcox (Oracle) <willy@infradead.org>
Cc: Michael Ellerman <mpe@ellerman.id.au>
Cc: "Mike Rapoport (IBM)" <rppt@kernel.org>
Cc: Muchun Song <muchun.song@linux.dev>
Cc: Rik van Riel <riel@surriel.com>
Cc: Vlastimil Babka <vbabka@suse.cz>
Cc: Yang Shi <shy828301@gmail.com>
Signed-off-by: Andrew Morton <akpm@linux-foundation.org>
This commit is contained in:
Peter Xu 2024-03-27 11:23:26 -04:00 committed by Andrew Morton
parent 607c63195d
commit f3c94c625f

View File

@ -2789,13 +2789,16 @@ static int __gup_device_huge_pud(pud_t pud, pud_t *pudp, unsigned long addr,
} }
#endif #endif
static int record_subpages(struct page *page, unsigned long addr, static int record_subpages(struct page *page, unsigned long sz,
unsigned long end, struct page **pages) unsigned long addr, unsigned long end,
struct page **pages)
{ {
struct page *start_page;
int nr; int nr;
start_page = nth_page(page, (addr & (sz - 1)) >> PAGE_SHIFT);
for (nr = 0; addr != end; nr++, addr += PAGE_SIZE) for (nr = 0; addr != end; nr++, addr += PAGE_SIZE)
pages[nr] = nth_page(page, nr); pages[nr] = nth_page(start_page, nr);
return nr; return nr;
} }
@ -2830,8 +2833,8 @@ static int gup_hugepte(pte_t *ptep, unsigned long sz, unsigned long addr,
/* hugepages are never "special" */ /* hugepages are never "special" */
VM_BUG_ON(!pfn_valid(pte_pfn(pte))); VM_BUG_ON(!pfn_valid(pte_pfn(pte)));
page = nth_page(pte_page(pte), (addr & (sz - 1)) >> PAGE_SHIFT); page = pte_page(pte);
refs = record_subpages(page, addr, end, pages + *nr); refs = record_subpages(page, sz, addr, end, pages + *nr);
folio = try_grab_folio(page, refs, flags); folio = try_grab_folio(page, refs, flags);
if (!folio) if (!folio)
@ -2904,8 +2907,8 @@ static int gup_huge_pmd(pmd_t orig, pmd_t *pmdp, unsigned long addr,
pages, nr); pages, nr);
} }
page = nth_page(pmd_page(orig), (addr & ~PMD_MASK) >> PAGE_SHIFT); page = pmd_page(orig);
refs = record_subpages(page, addr, end, pages + *nr); refs = record_subpages(page, PMD_SIZE, addr, end, pages + *nr);
folio = try_grab_folio(page, refs, flags); folio = try_grab_folio(page, refs, flags);
if (!folio) if (!folio)
@ -2948,8 +2951,8 @@ static int gup_huge_pud(pud_t orig, pud_t *pudp, unsigned long addr,
pages, nr); pages, nr);
} }
page = nth_page(pud_page(orig), (addr & ~PUD_MASK) >> PAGE_SHIFT); page = pud_page(orig);
refs = record_subpages(page, addr, end, pages + *nr); refs = record_subpages(page, PUD_SIZE, addr, end, pages + *nr);
folio = try_grab_folio(page, refs, flags); folio = try_grab_folio(page, refs, flags);
if (!folio) if (!folio)
@ -2988,8 +2991,8 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr,
BUILD_BUG_ON(pgd_devmap(orig)); BUILD_BUG_ON(pgd_devmap(orig));
page = nth_page(pgd_page(orig), (addr & ~PGDIR_MASK) >> PAGE_SHIFT); page = pgd_page(orig);
refs = record_subpages(page, addr, end, pages + *nr); refs = record_subpages(page, PGDIR_SIZE, addr, end, pages + *nr);
folio = try_grab_folio(page, refs, flags); folio = try_grab_folio(page, refs, flags);
if (!folio) if (!folio)