Lines Matching full:page

10  * Provides methods for unmapping each kind of mapped page:
25 * page->flags PG_locked (lock_page) * (see huegtlbfs below)
28 * hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
50 * hugetlb_fault_mutex (hugetlbfs specific page fault mutex)
51 * page->flags PG_locked (lock_page)
274 * searches where page is mapped.
457 * Getting a lock on a stable anon_vma from a page off the LRU is tricky!
461 * have been relevant to this page.
463 * The page might have been remapped to a different anon_vma or the anon_vma
468 * ensure that any anon_vma obtained from the page will still be valid for as
472 * chain and verify that the page in question is indeed mapped in it
476 * page_remove_rmap() that the anon_vma pointer from page->mapping is valid
480 struct anon_vma *page_get_anon_vma(struct page *page) in page_get_anon_vma() argument
486 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_get_anon_vma()
489 if (!page_mapped(page)) in page_get_anon_vma()
499 * If this page is still mapped, then its anon_vma cannot have been in page_get_anon_vma()
505 if (!page_mapped(page)) { in page_get_anon_vma()
524 struct anon_vma *page_lock_anon_vma_read(struct page *page, in page_lock_anon_vma_read() argument
533 anon_mapping = (unsigned long)READ_ONCE(page->mapping); in page_lock_anon_vma_read()
536 if (!page_mapped(page)) in page_lock_anon_vma_read()
543 * If the page is still mapped, then this anon_vma is still in page_lock_anon_vma_read()
547 if (!page_mapped(page)) { in page_lock_anon_vma_read()
553 trace_android_vh_do_page_trylock(page, NULL, NULL, &success); in page_lock_anon_vma_read()
571 if (!page_mapped(page)) { in page_lock_anon_vma_read()
608 * before any IO is initiated on the page to prevent lost writes. Similarly,
649 * before the page is queued for IO. in set_tlb_ubc_flush_pending()
678 * the page and flushing the page. If this race occurs, it potentially allows
714 * At what user virtual address is page expected in vma?
715 * Caller should check the page is actually part of the vma.
717 unsigned long page_address_in_vma(struct page *page, struct vm_area_struct *vma) in page_address_in_vma() argument
719 if (PageAnon(page)) { in page_address_in_vma()
720 struct anon_vma *page__anon_vma = page_anon_vma(page); in page_address_in_vma()
730 } else if (vma->vm_file->f_mapping != compound_head(page)->mapping) { in page_address_in_vma()
734 return vma_address(page, vma); in page_address_in_vma()
780 static bool page_referenced_one(struct page *page, struct vm_area_struct *vma, in page_referenced_one() argument
785 .page = page, in page_referenced_one()
801 trace_android_vh_look_around(&pvmw, page, vma, &referenced); in page_referenced_one()
807 * If the page has been used in another mapping, in page_referenced_one()
810 * PG_referenced or activated the page. in page_referenced_one()
820 /* unexpected pmd-mapped page? */ in page_referenced_one()
828 clear_page_idle(page); in page_referenced_one()
829 if (test_and_clear_page_young(page)) in page_referenced_one()
837 trace_android_vh_page_referenced_one_end(vma, page, referenced); in page_referenced_one()
856 * page_referenced - test if the page was referenced
857 * @page: the page to test
858 * @is_locked: caller holds lock on the page
860 * @vm_flags: collect encountered vma->vm_flags who actually referenced the page
862 * Quick test_and_clear_referenced for all mappings of a page,
864 * Return: The number of mappings which referenced the page. Return -1 if
867 int page_referenced(struct page *page, in page_referenced() argument
874 .mapcount = total_mapcount(page), in page_referenced()
888 if (!page_rmapping(page)) in page_referenced()
891 if (!is_locked && (!PageAnon(page) || PageKsm(page))) { in page_referenced()
892 we_locked = trylock_page(page); in page_referenced()
906 rmap_walk(page, &rwc); in page_referenced()
910 unlock_page(page); in page_referenced()
915 static bool page_mkclean_one(struct page *page, struct vm_area_struct *vma, in page_mkclean_one() argument
919 .page = page, in page_mkclean_one()
929 * the page can not be free from this function. in page_mkclean_one()
933 vma_address_end(page, vma)); in page_mkclean_one()
961 flush_cache_page(vma, address, page_to_pfn(page)); in page_mkclean_one()
968 /* unexpected pmd-mapped page? */ in page_mkclean_one()
975 * downgrading page table protection not changing it to point in page_mkclean_one()
976 * to a new page. in page_mkclean_one()
997 int page_mkclean(struct page *page) in page_mkclean() argument
1007 BUG_ON(!PageLocked(page)); in page_mkclean()
1009 if (!page_mapped(page)) in page_mkclean()
1012 mapping = page_mapping(page); in page_mkclean()
1016 rmap_walk(page, &rwc); in page_mkclean()
1023 * page_move_anon_rmap - move a page to our anon_vma
1024 * @page: the page to move to our anon_vma
1025 * @vma: the vma the page belongs to
1027 * When a page belongs exclusively to one process after a COW event,
1028 * that page can be moved into the anon_vma that belongs to just that
1032 void page_move_anon_rmap(struct page *page, struct vm_area_struct *vma) in page_move_anon_rmap() argument
1036 page = compound_head(page); in page_move_anon_rmap()
1038 VM_BUG_ON_PAGE(!PageLocked(page), page); in page_move_anon_rmap()
1047 WRITE_ONCE(page->mapping, (struct address_space *) anon_vma); in page_move_anon_rmap()
1052 * @page: Page or Hugepage to add to rmap
1053 * @vma: VM area to add page to.
1055 * @exclusive: the page is exclusively owned by the current process
1057 static void __page_set_anon_rmap(struct page *page, in __page_set_anon_rmap() argument
1064 if (PageAnon(page)) in __page_set_anon_rmap()
1068 * If the page isn't exclusively mapped into this vma, in __page_set_anon_rmap()
1070 * page mapping! in __page_set_anon_rmap()
1076 page->mapping = (struct address_space *) anon_vma; in __page_set_anon_rmap()
1077 page->index = linear_page_index(vma, address); in __page_set_anon_rmap()
1082 * @page: the page to add the mapping to
1086 static void __page_check_anon_rmap(struct page *page, in __page_check_anon_rmap() argument
1090 * The page's anon-rmap details (mapping and index) are guaranteed to in __page_check_anon_rmap()
1094 * always holds the page locked, except if called from page_dup_rmap, in __page_check_anon_rmap()
1095 * in which case the page is already known to be setup. in __page_check_anon_rmap()
1101 VM_BUG_ON_PAGE(page_anon_vma(page)->root != vma->anon_vma->root, page); in __page_check_anon_rmap()
1102 VM_BUG_ON_PAGE(page_to_pgoff(page) != linear_page_index(vma, address), in __page_check_anon_rmap()
1103 page); in __page_check_anon_rmap()
1107 * page_add_anon_rmap - add pte mapping to an anonymous page
1108 * @page: the page to add the mapping to
1111 * @compound: charge the page as compound or small page
1113 * The caller needs to hold the pte lock, and the page must be locked in
1118 void page_add_anon_rmap(struct page *page, in page_add_anon_rmap() argument
1121 do_page_add_anon_rmap(page, vma, address, compound ? RMAP_COMPOUND : 0); in page_add_anon_rmap()
1129 void do_page_add_anon_rmap(struct page *page, in do_page_add_anon_rmap() argument
1136 if (unlikely(PageKsm(page))) in do_page_add_anon_rmap()
1137 lock_page_memcg(page); in do_page_add_anon_rmap()
1139 VM_BUG_ON_PAGE(!PageLocked(page), page); in do_page_add_anon_rmap()
1143 VM_BUG_ON_PAGE(!PageLocked(page), page); in do_page_add_anon_rmap()
1144 VM_BUG_ON_PAGE(!PageTransHuge(page), page); in do_page_add_anon_rmap()
1145 mapcount = compound_mapcount_ptr(page); in do_page_add_anon_rmap()
1148 trace_android_vh_update_page_mapcount(page, true, compound, in do_page_add_anon_rmap()
1151 first = atomic_inc_and_test(&page->_mapcount); in do_page_add_anon_rmap()
1155 int nr = compound ? thp_nr_pages(page) : 1; in do_page_add_anon_rmap()
1163 __inc_lruvec_page_state(page, NR_ANON_THPS); in do_page_add_anon_rmap()
1164 __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr); in do_page_add_anon_rmap()
1167 if (unlikely(PageKsm(page))) { in do_page_add_anon_rmap()
1168 unlock_page_memcg(page); in do_page_add_anon_rmap()
1174 __page_set_anon_rmap(page, vma, address, in do_page_add_anon_rmap()
1177 __page_check_anon_rmap(page, vma, address); in do_page_add_anon_rmap()
1181 * __page_add_new_anon_rmap - add pte mapping to a new anonymous page
1182 * @page: the page to add the mapping to
1185 * @compound: charge the page as compound or small page
1189 * Page does not have to be locked.
1191 void __page_add_new_anon_rmap(struct page *page, in __page_add_new_anon_rmap() argument
1194 int nr = compound ? thp_nr_pages(page) : 1; in __page_add_new_anon_rmap()
1196 __SetPageSwapBacked(page); in __page_add_new_anon_rmap()
1198 VM_BUG_ON_PAGE(!PageTransHuge(page), page); in __page_add_new_anon_rmap()
1200 atomic_set(compound_mapcount_ptr(page), 0); in __page_add_new_anon_rmap()
1201 if (hpage_pincount_available(page)) in __page_add_new_anon_rmap()
1202 atomic_set(compound_pincount_ptr(page), 0); in __page_add_new_anon_rmap()
1204 __inc_lruvec_page_state(page, NR_ANON_THPS); in __page_add_new_anon_rmap()
1207 VM_BUG_ON_PAGE(PageTransCompound(page), page); in __page_add_new_anon_rmap()
1209 atomic_set(&page->_mapcount, 0); in __page_add_new_anon_rmap()
1211 __mod_lruvec_page_state(page, NR_ANON_MAPPED, nr); in __page_add_new_anon_rmap()
1212 __page_set_anon_rmap(page, vma, address, 1); in __page_add_new_anon_rmap()
1216 * page_add_file_rmap - add pte mapping to a file page
1217 * @page: the page to add the mapping to
1218 * @compound: charge the page as compound or small page
1222 void page_add_file_rmap(struct page *page, bool compound) in page_add_file_rmap() argument
1228 VM_BUG_ON_PAGE(compound && !PageTransHuge(page), page); in page_add_file_rmap()
1229 lock_page_memcg(page); in page_add_file_rmap()
1230 if (compound && PageTransHuge(page)) { in page_add_file_rmap()
1231 for (i = 0, nr = 0; i < thp_nr_pages(page); i++) { in page_add_file_rmap()
1232 trace_android_vh_update_page_mapcount(&page[i], true, in page_add_file_rmap()
1238 if (atomic_inc_and_test(&page[i]._mapcount)) in page_add_file_rmap()
1242 if (!atomic_inc_and_test(compound_mapcount_ptr(page))) in page_add_file_rmap()
1244 if (PageSwapBacked(page)) in page_add_file_rmap()
1245 __inc_node_page_state(page, NR_SHMEM_PMDMAPPED); in page_add_file_rmap()
1247 __inc_node_page_state(page, NR_FILE_PMDMAPPED); in page_add_file_rmap()
1249 if (PageTransCompound(page) && page_mapping(page)) { in page_add_file_rmap()
1250 VM_WARN_ON_ONCE(!PageLocked(page)); in page_add_file_rmap()
1252 SetPageDoubleMap(compound_head(page)); in page_add_file_rmap()
1253 if (PageMlocked(page)) in page_add_file_rmap()
1254 clear_page_mlock(compound_head(page)); in page_add_file_rmap()
1256 trace_android_vh_update_page_mapcount(page, true, in page_add_file_rmap()
1262 if (!atomic_inc_and_test(&page->_mapcount)) in page_add_file_rmap()
1266 __mod_lruvec_page_state(page, NR_FILE_MAPPED, nr); in page_add_file_rmap()
1268 unlock_page_memcg(page); in page_add_file_rmap()
1271 static void page_remove_file_rmap(struct page *page, bool compound) in page_remove_file_rmap() argument
1277 VM_BUG_ON_PAGE(compound && !PageHead(page), page); in page_remove_file_rmap()
1280 if (unlikely(PageHuge(page))) { in page_remove_file_rmap()
1282 atomic_dec(compound_mapcount_ptr(page)); in page_remove_file_rmap()
1286 /* page still mapped by someone else? */ in page_remove_file_rmap()
1287 if (compound && PageTransHuge(page)) { in page_remove_file_rmap()
1288 for (i = 0, nr = 0; i < thp_nr_pages(page); i++) { in page_remove_file_rmap()
1289 trace_android_vh_update_page_mapcount(&page[i], false, in page_remove_file_rmap()
1295 if (atomic_add_negative(-1, &page[i]._mapcount)) in page_remove_file_rmap()
1299 if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) in page_remove_file_rmap()
1301 if (PageSwapBacked(page)) in page_remove_file_rmap()
1302 __dec_node_page_state(page, NR_SHMEM_PMDMAPPED); in page_remove_file_rmap()
1304 __dec_node_page_state(page, NR_FILE_PMDMAPPED); in page_remove_file_rmap()
1306 trace_android_vh_update_page_mapcount(page, false, in page_remove_file_rmap()
1312 if (!atomic_add_negative(-1, &page->_mapcount)) in page_remove_file_rmap()
1322 __mod_lruvec_page_state(page, NR_FILE_MAPPED, -nr); in page_remove_file_rmap()
1324 if (unlikely(PageMlocked(page))) in page_remove_file_rmap()
1325 clear_page_mlock(page); in page_remove_file_rmap()
1328 static void page_remove_anon_compound_rmap(struct page *page) in page_remove_anon_compound_rmap() argument
1334 if (!atomic_add_negative(-1, compound_mapcount_ptr(page))) in page_remove_anon_compound_rmap()
1338 if (unlikely(PageHuge(page))) in page_remove_anon_compound_rmap()
1344 __dec_lruvec_page_state(page, NR_ANON_THPS); in page_remove_anon_compound_rmap()
1346 if (TestClearPageDoubleMap(page)) { in page_remove_anon_compound_rmap()
1351 for (i = 0, nr = 0; i < thp_nr_pages(page); i++) { in page_remove_anon_compound_rmap()
1352 trace_android_vh_update_page_mapcount(&page[i], false, in page_remove_anon_compound_rmap()
1358 if (atomic_add_negative(-1, &page[i]._mapcount)) in page_remove_anon_compound_rmap()
1364 * Queue the page for deferred split if at least one small in page_remove_anon_compound_rmap()
1365 * page of the compound page is unmapped, but at least one in page_remove_anon_compound_rmap()
1366 * small page is still mapped. in page_remove_anon_compound_rmap()
1368 if (nr && nr < thp_nr_pages(page)) in page_remove_anon_compound_rmap()
1369 deferred_split_huge_page(page); in page_remove_anon_compound_rmap()
1371 nr = thp_nr_pages(page); in page_remove_anon_compound_rmap()
1374 if (unlikely(PageMlocked(page))) in page_remove_anon_compound_rmap()
1375 clear_page_mlock(page); in page_remove_anon_compound_rmap()
1378 __mod_lruvec_page_state(page, NR_ANON_MAPPED, -nr); in page_remove_anon_compound_rmap()
1382 * page_remove_rmap - take down pte mapping from a page
1383 * @page: page to remove mapping from
1384 * @compound: uncharge the page as compound or small page
1388 void page_remove_rmap(struct page *page, bool compound) in page_remove_rmap() argument
1392 lock_page_memcg(page); in page_remove_rmap()
1394 if (!PageAnon(page)) { in page_remove_rmap()
1395 page_remove_file_rmap(page, compound); in page_remove_rmap()
1400 page_remove_anon_compound_rmap(page); in page_remove_rmap()
1404 trace_android_vh_update_page_mapcount(page, false, in page_remove_rmap()
1410 /* page still mapped by someone else? */ in page_remove_rmap()
1411 if (!atomic_add_negative(-1, &page->_mapcount)) in page_remove_rmap()
1419 __dec_lruvec_page_state(page, NR_ANON_MAPPED); in page_remove_rmap()
1421 if (unlikely(PageMlocked(page))) in page_remove_rmap()
1422 clear_page_mlock(page); in page_remove_rmap()
1424 if (PageTransCompound(page)) in page_remove_rmap()
1425 deferred_split_huge_page(compound_head(page)); in page_remove_rmap()
1437 unlock_page_memcg(page); in page_remove_rmap()
1443 static bool try_to_unmap_one(struct page *page, struct vm_area_struct *vma, in try_to_unmap_one() argument
1448 .page = page, in try_to_unmap_one()
1453 struct page *subpage; in try_to_unmap_one()
1462 * if page table locking is skipped: use TTU_SYNC to wait for that. in try_to_unmap_one()
1472 is_zone_device_page(page) && !is_device_private_page(page)) in try_to_unmap_one()
1477 flags & TTU_SPLIT_FREEZE, page); in try_to_unmap_one()
1485 * Note that the page can not be free in this function as call of in try_to_unmap_one()
1486 * try_to_unmap() must hold a reference on the page. in try_to_unmap_one()
1488 range.end = PageKsm(page) ? in try_to_unmap_one()
1489 address + PAGE_SIZE : vma_address_end(page, vma); in try_to_unmap_one()
1492 if (PageHuge(page)) { in try_to_unmap_one()
1506 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page); in try_to_unmap_one()
1508 set_pmd_migration_entry(&pvmw, page); in try_to_unmap_one()
1514 * If the page is mlock()d, we cannot swap it out. in try_to_unmap_one()
1521 if (!PageTransCompound(page)) { in try_to_unmap_one()
1526 mlock_vma_page(page); in try_to_unmap_one()
1537 VM_BUG_ON_PAGE(!pvmw.pte, page); in try_to_unmap_one()
1539 subpage = page - page_to_pfn(page) + pte_pfn(*pvmw.pte); in try_to_unmap_one()
1542 if (PageHuge(page) && !PageAnon(page)) { in try_to_unmap_one()
1552 * page. There is no way of knowing exactly in try_to_unmap_one()
1563 * The ref count of the PMD page was dropped in try_to_unmap_one()
1568 * unmap the actual page and drop map count in try_to_unmap_one()
1578 is_zone_device_page(page)) { in try_to_unmap_one()
1585 * Store the pfn of the page in a special migration in try_to_unmap_one()
1589 entry = make_migration_entry(page, 0); in try_to_unmap_one()
1593 * pteval maps a zone device page and is therefore in try_to_unmap_one()
1608 * migrated, just set it to page. This will need to be in try_to_unmap_one()
1612 subpage = page; in try_to_unmap_one()
1616 /* Nuke the page table entry. */ in try_to_unmap_one()
1621 * a remote CPU could still be writing to the page. in try_to_unmap_one()
1634 /* Move the dirty bit to the page. Now the pte is gone. */ in try_to_unmap_one()
1636 set_page_dirty(page); in try_to_unmap_one()
1641 if (PageHWPoison(page) && !(flags & TTU_IGNORE_HWPOISON)) { in try_to_unmap_one()
1643 if (PageHuge(page)) { in try_to_unmap_one()
1644 hugetlb_count_sub(compound_nr(page), mm); in try_to_unmap_one()
1649 dec_mm_counter(mm, mm_counter(page)); in try_to_unmap_one()
1655 * The guest indicated that the page content is of no in try_to_unmap_one()
1659 * page. When userfaultfd is active, we must not drop in try_to_unmap_one()
1660 * this page though, as its main user (postcopy in try_to_unmap_one()
1664 dec_mm_counter(mm, mm_counter(page)); in try_to_unmap_one()
1681 * Store the pfn of the page in a special migration in try_to_unmap_one()
1697 } else if (PageAnon(page)) { in try_to_unmap_one()
1704 if (unlikely(PageSwapBacked(page) != PageSwapCache(page))) { in try_to_unmap_one()
1714 /* MADV_FREE page check */ in try_to_unmap_one()
1715 if (!PageSwapBacked(page)) { in try_to_unmap_one()
1725 ref_count = page_ref_count(page); in try_to_unmap_one()
1726 map_count = page_mapcount(page); in try_to_unmap_one()
1729 * Order reads for page refcount and dirty flag in try_to_unmap_one()
1735 * The only page refs must be one from isolation in try_to_unmap_one()
1739 !PageDirty(page)) { in try_to_unmap_one()
1748 * If the page was redirtied, it cannot be in try_to_unmap_one()
1749 * discarded. Remap the page to page table. in try_to_unmap_one()
1752 SetPageSwapBacked(page); in try_to_unmap_one()
1789 * This is a locked file-backed page, thus it cannot in try_to_unmap_one()
1790 * be removed from the page cache and replaced by a new in try_to_unmap_one()
1791 * page before mmu_notifier_invalidate_range_end, so no in try_to_unmap_one()
1792 * concurrent thread might update its page table to in try_to_unmap_one()
1793 * point at new page while a device still is using this in try_to_unmap_one()
1794 * page. in try_to_unmap_one()
1798 dec_mm_counter(mm, mm_counter_file(page)); in try_to_unmap_one()
1803 * done above for all cases requiring it to happen under page in try_to_unmap_one()
1808 page_remove_rmap(subpage, PageHuge(page)); in try_to_unmap_one()
1809 put_page(page); in try_to_unmap_one()
1813 trace_android_vh_try_to_unmap_one(vma, page, address, ret); in try_to_unmap_one()
1823 static int page_not_mapped(struct page *page) in page_not_mapped() argument
1825 return !page_mapped(page); in page_not_mapped()
1829 * try_to_unmap - try to remove all page table mappings to a page
1830 * @page: the page to get unmapped
1833 * Tries to remove all the page table entries which are mapping this
1834 * page, used in the pageout path. Caller must hold the page lock.
1838 bool try_to_unmap(struct page *page, enum ttu_flags flags) in try_to_unmap() argument
1850 * page tables leading to a race where migration cannot in try_to_unmap()
1856 && !PageKsm(page) && PageAnon(page)) in try_to_unmap()
1860 rmap_walk_locked(page, &rwc); in try_to_unmap()
1862 rmap_walk(page, &rwc); in try_to_unmap()
1868 * if page table locking is skipped: use TTU_SYNC to wait for that. in try_to_unmap()
1870 return !page_mapcount(page); in try_to_unmap()
1874 * try_to_munlock - try to munlock a page
1875 * @page: the page to be munlocked
1877 * Called from munlock code. Checks all of the VMAs mapping the page
1878 * to make sure nobody else has this page mlocked. The page will be
1882 void try_to_munlock(struct page *page) in try_to_munlock() argument
1892 VM_BUG_ON_PAGE(!PageLocked(page) || PageLRU(page), page); in try_to_munlock()
1893 VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page); in try_to_munlock()
1895 rmap_walk(page, &rwc); in try_to_munlock()
1907 static struct anon_vma *rmap_walk_anon_lock(struct page *page, in rmap_walk_anon_lock() argument
1913 return rwc->anon_lock(page, rwc); in rmap_walk_anon_lock()
1921 anon_vma = page_anon_vma(page); in rmap_walk_anon_lock()
1940 * rmap_walk_anon - do something to anonymous page using the object-based
1942 * @page: the page to be handled
1945 * Find all the mappings of a page using the mapping pointer and the vma chains
1949 * where the page was found will be held for write. So, we won't recheck
1953 static void rmap_walk_anon(struct page *page, struct rmap_walk_control *rwc, in rmap_walk_anon() argument
1961 anon_vma = page_anon_vma(page); in rmap_walk_anon()
1963 VM_BUG_ON_PAGE(!anon_vma, page); in rmap_walk_anon()
1965 anon_vma = rmap_walk_anon_lock(page, rwc); in rmap_walk_anon()
1970 pgoff_start = page_to_pgoff(page); in rmap_walk_anon()
1971 pgoff_end = pgoff_start + thp_nr_pages(page) - 1; in rmap_walk_anon()
1975 unsigned long address = vma_address(page, vma); in rmap_walk_anon()
1983 if (!rwc->rmap_one(page, vma, address, rwc->arg)) in rmap_walk_anon()
1985 if (rwc->done && rwc->done(page)) in rmap_walk_anon()
1994 * rmap_walk_file - do something to file page using the object-based rmap method
1995 * @page: the page to be handled
1998 * Find all the mappings of a page using the mapping pointer and the vma chains
2002 * where the page was found will be held for write. So, we won't recheck
2006 static void rmap_walk_file(struct page *page, struct rmap_walk_control *rwc, in rmap_walk_file() argument
2009 struct address_space *mapping = page_mapping(page); in rmap_walk_file()
2015 * The page lock not only makes sure that page->mapping cannot in rmap_walk_file()
2020 VM_BUG_ON_PAGE(!PageLocked(page), page); in rmap_walk_file()
2025 pgoff_start = page_to_pgoff(page); in rmap_walk_file()
2026 pgoff_end = pgoff_start + thp_nr_pages(page) - 1; in rmap_walk_file()
2028 trace_android_vh_do_page_trylock(page, in rmap_walk_file()
2048 unsigned long address = vma_address(page, vma); in rmap_walk_file()
2056 if (!rwc->rmap_one(page, vma, address, rwc->arg)) in rmap_walk_file()
2058 if (rwc->done && rwc->done(page)) in rmap_walk_file()
2067 void rmap_walk(struct page *page, struct rmap_walk_control *rwc) in rmap_walk() argument
2069 if (unlikely(PageKsm(page))) in rmap_walk()
2070 rmap_walk_ksm(page, rwc); in rmap_walk()
2071 else if (PageAnon(page)) in rmap_walk()
2072 rmap_walk_anon(page, rwc, false); in rmap_walk()
2074 rmap_walk_file(page, rwc, false); in rmap_walk()
2078 void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc) in rmap_walk_locked() argument
2081 VM_BUG_ON_PAGE(PageKsm(page), page); in rmap_walk_locked()
2082 if (PageAnon(page)) in rmap_walk_locked()
2083 rmap_walk_anon(page, rwc, true); in rmap_walk_locked()
2085 rmap_walk_file(page, rwc, true); in rmap_walk_locked()
2094 void hugepage_add_anon_rmap(struct page *page, in hugepage_add_anon_rmap() argument
2100 BUG_ON(!PageLocked(page)); in hugepage_add_anon_rmap()
2103 first = atomic_inc_and_test(compound_mapcount_ptr(page)); in hugepage_add_anon_rmap()
2105 __page_set_anon_rmap(page, vma, address, 0); in hugepage_add_anon_rmap()
2108 void hugepage_add_new_anon_rmap(struct page *page, in hugepage_add_new_anon_rmap() argument
2112 atomic_set(compound_mapcount_ptr(page), 0); in hugepage_add_new_anon_rmap()
2113 if (hpage_pincount_available(page)) in hugepage_add_new_anon_rmap()
2114 atomic_set(compound_pincount_ptr(page), 0); in hugepage_add_new_anon_rmap()
2116 __page_set_anon_rmap(page, vma, address, 1); in hugepage_add_new_anon_rmap()