Re: [PATCH v4 15/26] mm/hugetlb: Drop __unmap_hugepage_range definition from hugetlb.h

From: kernel test robot
Date: Thu Jul 15 2021 - 01:52:46 EST


Hi Peter,

Thank you for the patch! Perhaps something to improve:

[auto build test WARNING on linus/master]
[also build test WARNING on v5.14-rc1 next-20210714]
[cannot apply to hnaz-linux-mm/master asm-generic/master linux/master]
[If your patch is applied to the wrong git tree, kindly drop us a note.
And when submitting patch, we suggest to use '--base' as documented in
https://git-scm.com/docs/git-format-patch]

url: https://github.com/0day-ci/linux/commits/Peter-Xu/userfaultfd-wp-Support-shmem-and-hugetlbfs/20210715-062718
base: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git 8096acd7442e613fad0354fc8dfdb2003cceea0b
config: powerpc64-randconfig-r032-20210714 (attached as .config)
compiler: powerpc64-linux-gcc (GCC) 9.3.0
reproduce (this is a W=1 build):
wget https://raw.githubusercontent.com/intel/lkp-tests/master/sbin/make.cross -O ~/bin/make.cross
chmod +x ~/bin/make.cross
# https://github.com/0day-ci/linux/commit/f8dd355edbfe948f84c8aaa10a5173656aa2778c
git remote add linux-review https://github.com/0day-ci/linux
git fetch --no-tags linux-review Peter-Xu/userfaultfd-wp-Support-shmem-and-hugetlbfs/20210715-062718
git checkout f8dd355edbfe948f84c8aaa10a5173656aa2778c
# save the attached .config to linux build tree
COMPILER_INSTALL_PATH=$HOME/0day COMPILER=gcc-9.3.0 make.cross ARCH=powerpc64

If you fix the issue, kindly add following tag as appropriate
Reported-by: kernel test robot <lkp@xxxxxxxxx>

All warnings (new ones prefixed by >>):

>> mm/hugetlb.c:4334:6: warning: no previous prototype for '__unmap_hugepage_range' [-Wmissing-prototypes]
4334 | void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
| ^~~~~~~~~~~~~~~~~~~~~~


vim +/__unmap_hugepage_range +4334 mm/hugetlb.c

63551ae0feaaa2 David Gibson 2005-06-21 4333
24669e58477e27 Aneesh Kumar K.V 2012-07-31 @4334 void __unmap_hugepage_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
24669e58477e27 Aneesh Kumar K.V 2012-07-31 4335 unsigned long start, unsigned long end,
24669e58477e27 Aneesh Kumar K.V 2012-07-31 4336 struct page *ref_page)
63551ae0feaaa2 David Gibson 2005-06-21 4337 {
63551ae0feaaa2 David Gibson 2005-06-21 4338 struct mm_struct *mm = vma->vm_mm;
63551ae0feaaa2 David Gibson 2005-06-21 4339 unsigned long address;
c7546f8f03f5a4 David Gibson 2005-08-05 4340 pte_t *ptep;
63551ae0feaaa2 David Gibson 2005-06-21 4341 pte_t pte;
cb900f41215447 Kirill A. Shutemov 2013-11-14 4342 spinlock_t *ptl;
63551ae0feaaa2 David Gibson 2005-06-21 4343 struct page *page;
a5516438959d90 Andi Kleen 2008-07-23 4344 struct hstate *h = hstate_vma(vma);
a5516438959d90 Andi Kleen 2008-07-23 4345 unsigned long sz = huge_page_size(h);
ac46d4f3c43241 Jérôme Glisse 2018-12-28 4346 struct mmu_notifier_range range;
a5516438959d90 Andi Kleen 2008-07-23 4347
63551ae0feaaa2 David Gibson 2005-06-21 4348 WARN_ON(!is_vm_hugetlb_page(vma));
a5516438959d90 Andi Kleen 2008-07-23 4349 BUG_ON(start & ~huge_page_mask(h));
a5516438959d90 Andi Kleen 2008-07-23 4350 BUG_ON(end & ~huge_page_mask(h));
63551ae0feaaa2 David Gibson 2005-06-21 4351
07e326610e5634 Aneesh Kumar K.V 2016-12-12 4352 /*
07e326610e5634 Aneesh Kumar K.V 2016-12-12 4353 * This is a hugetlb vma, all the pte entries should point
07e326610e5634 Aneesh Kumar K.V 2016-12-12 4354 * to huge page.
07e326610e5634 Aneesh Kumar K.V 2016-12-12 4355 */
ed6a79352cad00 Peter Zijlstra 2018-08-31 4356 tlb_change_page_size(tlb, sz);
24669e58477e27 Aneesh Kumar K.V 2012-07-31 4357 tlb_start_vma(tlb, vma);
dff11abe280b47 Mike Kravetz 2018-10-05 4358
dff11abe280b47 Mike Kravetz 2018-10-05 4359 /*
dff11abe280b47 Mike Kravetz 2018-10-05 4360 * If sharing possible, alert mmu notifiers of worst case.
dff11abe280b47 Mike Kravetz 2018-10-05 4361 */
6f4f13e8d9e27c Jérôme Glisse 2019-05-13 4362 mmu_notifier_range_init(&range, MMU_NOTIFY_UNMAP, 0, vma, mm, start,
6f4f13e8d9e27c Jérôme Glisse 2019-05-13 4363 end);
ac46d4f3c43241 Jérôme Glisse 2018-12-28 4364 adjust_range_if_pmd_sharing_possible(vma, &range.start, &range.end);
ac46d4f3c43241 Jérôme Glisse 2018-12-28 4365 mmu_notifier_invalidate_range_start(&range);
569f48b85813f0 Hillf Danton 2014-12-10 4366 address = start;
569f48b85813f0 Hillf Danton 2014-12-10 4367 for (; address < end; address += sz) {
7868a2087ec13e Punit Agrawal 2017-07-06 4368 ptep = huge_pte_offset(mm, address, sz);
c7546f8f03f5a4 David Gibson 2005-08-05 4369 if (!ptep)
c7546f8f03f5a4 David Gibson 2005-08-05 4370 continue;
c7546f8f03f5a4 David Gibson 2005-08-05 4371
cb900f41215447 Kirill A. Shutemov 2013-11-14 4372 ptl = huge_pte_lock(h, mm, ptep);
34ae204f18519f Mike Kravetz 2020-08-11 4373 if (huge_pmd_unshare(mm, vma, &address, ptep)) {
31d49da5ad0172 Aneesh Kumar K.V 2016-07-26 4374 spin_unlock(ptl);
dff11abe280b47 Mike Kravetz 2018-10-05 4375 /*
dff11abe280b47 Mike Kravetz 2018-10-05 4376 * We just unmapped a page of PMDs by clearing a PUD.
dff11abe280b47 Mike Kravetz 2018-10-05 4377 * The caller's TLB flush range should cover this area.
dff11abe280b47 Mike Kravetz 2018-10-05 4378 */
31d49da5ad0172 Aneesh Kumar K.V 2016-07-26 4379 continue;
31d49da5ad0172 Aneesh Kumar K.V 2016-07-26 4380 }
39dde65c9940c9 Kenneth W Chen 2006-12-06 4381
6629326b89b6e6 Hillf Danton 2012-03-23 4382 pte = huge_ptep_get(ptep);
31d49da5ad0172 Aneesh Kumar K.V 2016-07-26 4383 if (huge_pte_none(pte)) {
31d49da5ad0172 Aneesh Kumar K.V 2016-07-26 4384 spin_unlock(ptl);
31d49da5ad0172 Aneesh Kumar K.V 2016-07-26 4385 continue;
31d49da5ad0172 Aneesh Kumar K.V 2016-07-26 4386 }
6629326b89b6e6 Hillf Danton 2012-03-23 4387
6629326b89b6e6 Hillf Danton 2012-03-23 4388 /*
9fbc1f635fd0bd Naoya Horiguchi 2015-02-11 4389 * Migrating hugepage or HWPoisoned hugepage is already
9fbc1f635fd0bd Naoya Horiguchi 2015-02-11 4390 * unmapped and its refcount is dropped, so just clear pte here.
6629326b89b6e6 Hillf Danton 2012-03-23 4391 */
9fbc1f635fd0bd Naoya Horiguchi 2015-02-11 4392 if (unlikely(!pte_present(pte))) {
9386fac34c7cbe Punit Agrawal 2017-07-06 4393 huge_pte_clear(mm, address, ptep, sz);
31d49da5ad0172 Aneesh Kumar K.V 2016-07-26 4394 spin_unlock(ptl);
31d49da5ad0172 Aneesh Kumar K.V 2016-07-26 4395 continue;
8c4894c6bc790d Naoya Horiguchi 2012-12-12 4396 }
6629326b89b6e6 Hillf Danton 2012-03-23 4397
6629326b89b6e6 Hillf Danton 2012-03-23 4398 page = pte_page(pte);
04f2cbe35699d2 Mel Gorman 2008-07-23 4399 /*
04f2cbe35699d2 Mel Gorman 2008-07-23 4400 * If a reference page is supplied, it is because a specific
04f2cbe35699d2 Mel Gorman 2008-07-23 4401 * page is being unmapped, not a range. Ensure the page we
04f2cbe35699d2 Mel Gorman 2008-07-23 4402 * are about to unmap is the actual page of interest.
04f2cbe35699d2 Mel Gorman 2008-07-23 4403 */
04f2cbe35699d2 Mel Gorman 2008-07-23 4404 if (ref_page) {
31d49da5ad0172 Aneesh Kumar K.V 2016-07-26 4405 if (page != ref_page) {
31d49da5ad0172 Aneesh Kumar K.V 2016-07-26 4406 spin_unlock(ptl);
31d49da5ad0172 Aneesh Kumar K.V 2016-07-26 4407 continue;
31d49da5ad0172 Aneesh Kumar K.V 2016-07-26 4408 }
04f2cbe35699d2 Mel Gorman 2008-07-23 4409 /*
04f2cbe35699d2 Mel Gorman 2008-07-23 4410 * Mark the VMA as having unmapped its page so that
04f2cbe35699d2 Mel Gorman 2008-07-23 4411 * future faults in this VMA will fail rather than
04f2cbe35699d2 Mel Gorman 2008-07-23 4412 * looking like data was lost
04f2cbe35699d2 Mel Gorman 2008-07-23 4413 */
04f2cbe35699d2 Mel Gorman 2008-07-23 4414 set_vma_resv_flags(vma, HPAGE_RESV_UNMAPPED);
04f2cbe35699d2 Mel Gorman 2008-07-23 4415 }
04f2cbe35699d2 Mel Gorman 2008-07-23 4416
c7546f8f03f5a4 David Gibson 2005-08-05 4417 pte = huge_ptep_get_and_clear(mm, address, ptep);
b528e4b6405b9f Aneesh Kumar K.V 2016-12-12 4418 tlb_remove_huge_tlb_entry(h, tlb, ptep, address);
106c992a5ebef2 Gerald Schaefer 2013-04-29 4419 if (huge_pte_dirty(pte))
6649a3863232eb Ken Chen 2007-02-08 4420 set_page_dirty(page);
9e81130b7ce230 Hillf Danton 2012-03-21 4421
5d317b2b653659 Naoya Horiguchi 2015-11-05 4422 hugetlb_count_sub(pages_per_huge_page(h), mm);
d281ee61451835 Kirill A. Shutemov 2016-01-15 4423 page_remove_rmap(page, true);
31d49da5ad0172 Aneesh Kumar K.V 2016-07-26 4424
cb900f41215447 Kirill A. Shutemov 2013-11-14 4425 spin_unlock(ptl);
e77b0852b551ff Aneesh Kumar K.V 2016-07-26 4426 tlb_remove_page_size(tlb, page, huge_page_size(h));
24669e58477e27 Aneesh Kumar K.V 2012-07-31 4427 /*
31d49da5ad0172 Aneesh Kumar K.V 2016-07-26 4428 * Bail out after unmapping reference page if supplied
24669e58477e27 Aneesh Kumar K.V 2012-07-31 4429 */
31d49da5ad0172 Aneesh Kumar K.V 2016-07-26 4430 if (ref_page)
31d49da5ad0172 Aneesh Kumar K.V 2016-07-26 4431 break;
fe1668ae5bf014 Kenneth W Chen 2006-10-04 4432 }
ac46d4f3c43241 Jérôme Glisse 2018-12-28 4433 mmu_notifier_invalidate_range_end(&range);
24669e58477e27 Aneesh Kumar K.V 2012-07-31 4434 tlb_end_vma(tlb, vma);
^1da177e4c3f41 Linus Torvalds 2005-04-16 4435 }
63551ae0feaaa2 David Gibson 2005-06-21 4436

---
0-DAY CI Kernel Test Service, Intel Corporation
https://lists.01.org/hyperkitty/list/kbuild-all@xxxxxxxxxxxx

Attachment: .config.gz
Description: application/gzip