Lines Matching full:page
7 * Page migration was first developed in the context of the memory hotplug
63 int isolate_movable_page(struct page *page, isolate_mode_t mode) in isolate_movable_page() argument
71 * In case we 'win' a race for a movable page being freed under us and in isolate_movable_page()
74 * release this page, thus avoiding a nasty leakage. in isolate_movable_page()
76 if (unlikely(!get_page_unless_zero(page))) in isolate_movable_page()
80 * Check PageMovable before holding a PG_lock because page's owner in isolate_movable_page()
81 * assumes anybody doesn't touch PG_lock of newly allocated page in isolate_movable_page()
82 * so unconditionally grabbing the lock ruins page's owner side. in isolate_movable_page()
84 if (unlikely(!__PageMovable(page))) in isolate_movable_page()
88 * compaction threads can race against page migration functions in isolate_movable_page()
89 * as well as race against the releasing a page. in isolate_movable_page()
91 * In order to avoid having an already isolated movable page in isolate_movable_page()
94 * lets be sure we have the page lock in isolate_movable_page()
95 * before proceeding with the movable page isolation steps. in isolate_movable_page()
97 if (unlikely(!trylock_page(page))) in isolate_movable_page()
100 if (!PageMovable(page) || PageIsolated(page)) in isolate_movable_page()
103 mapping = page_mapping(page); in isolate_movable_page()
104 VM_BUG_ON_PAGE(!mapping, page); in isolate_movable_page()
106 if (!mapping->a_ops->isolate_page(page, mode)) in isolate_movable_page()
109 /* Driver shouldn't use PG_isolated bit of page->flags */ in isolate_movable_page()
110 WARN_ON_ONCE(PageIsolated(page)); in isolate_movable_page()
111 SetPageIsolated(page); in isolate_movable_page()
112 unlock_page(page); in isolate_movable_page()
117 unlock_page(page); in isolate_movable_page()
119 put_page(page); in isolate_movable_page()
124 /* It should be called on page which is PG_movable */
125 void putback_movable_page(struct page *page) in putback_movable_page() argument
129 VM_BUG_ON_PAGE(!PageLocked(page), page); in putback_movable_page()
130 VM_BUG_ON_PAGE(!PageMovable(page), page); in putback_movable_page()
131 VM_BUG_ON_PAGE(!PageIsolated(page), page); in putback_movable_page()
133 mapping = page_mapping(page); in putback_movable_page()
134 mapping->a_ops->putback_page(page); in putback_movable_page()
135 ClearPageIsolated(page); in putback_movable_page()
143 * built from lru, balloon, hugetlbfs page. See isolate_migratepages_range()
148 struct page *page; in putback_movable_pages() local
149 struct page *page2; in putback_movable_pages()
151 list_for_each_entry_safe(page, page2, l, lru) { in putback_movable_pages()
152 if (unlikely(PageHuge(page))) { in putback_movable_pages()
153 putback_active_hugepage(page); in putback_movable_pages()
156 list_del(&page->lru); in putback_movable_pages()
158 * We isolated non-lru movable page so here we can use in putback_movable_pages()
159 * __PageMovable because LRU page's mapping cannot have in putback_movable_pages()
162 if (unlikely(__PageMovable(page))) { in putback_movable_pages()
163 VM_BUG_ON_PAGE(!PageIsolated(page), page); in putback_movable_pages()
164 lock_page(page); in putback_movable_pages()
165 if (PageMovable(page)) in putback_movable_pages()
166 putback_movable_page(page); in putback_movable_pages()
168 ClearPageIsolated(page); in putback_movable_pages()
169 unlock_page(page); in putback_movable_pages()
170 put_page(page); in putback_movable_pages()
172 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + in putback_movable_pages()
173 page_is_file_lru(page), -thp_nr_pages(page)); in putback_movable_pages()
174 putback_lru_page(page); in putback_movable_pages()
183 static bool remove_migration_pte(struct page *page, struct vm_area_struct *vma, in remove_migration_pte() argument
187 .page = old, in remove_migration_pte()
192 struct page *new; in remove_migration_pte()
196 VM_BUG_ON_PAGE(PageTail(page), page); in remove_migration_pte()
198 if (PageKsm(page)) in remove_migration_pte()
199 new = page; in remove_migration_pte()
201 new = page - pvmw.page->index + in remove_migration_pte()
207 VM_BUG_ON_PAGE(PageHuge(page) || !PageTransCompound(page), page); in remove_migration_pte()
258 if (PageTransHuge(page) && PageMlocked(page)) in remove_migration_pte()
259 clear_page_mlock(page); in remove_migration_pte()
270 * references to the indicated page.
272 void remove_migration_ptes(struct page *old, struct page *new, bool locked) in remove_migration_ptes()
286 * Something used the pte of a page under migration. We need to
287 * get to the page and wait until migration is finished.
295 struct page *page; in __migration_entry_wait() local
306 page = migration_entry_to_page(entry); in __migration_entry_wait()
307 page = compound_head(page); in __migration_entry_wait()
310 * Once page cache replacement of page migration started, page_count in __migration_entry_wait()
314 if (!get_page_unless_zero(page)) in __migration_entry_wait()
317 trace_android_vh_waiting_for_page_migration(page); in __migration_entry_wait()
318 put_and_wait_on_page_locked(page); in __migration_entry_wait()
343 struct page *page; in pmd_migration_entry_wait() local
348 page = migration_entry_to_page(pmd_to_swp_entry(*pmd)); in pmd_migration_entry_wait()
349 if (!get_page_unless_zero(page)) in pmd_migration_entry_wait()
352 put_and_wait_on_page_locked(page); in pmd_migration_entry_wait()
359 static int expected_page_refs(struct address_space *mapping, struct page *page) in expected_page_refs() argument
367 expected_count += is_device_private_page(page); in expected_page_refs()
369 expected_count += thp_nr_pages(page) + page_has_private(page); in expected_page_refs()
375 * Replace the page in the mapping.
383 struct page *newpage, struct page *page, int extra_count) in migrate_page_move_mapping() argument
385 XA_STATE(xas, &mapping->i_pages, page_index(page)); in migrate_page_move_mapping()
388 int expected_count = expected_page_refs(mapping, page) + extra_count; in migrate_page_move_mapping()
389 int nr = thp_nr_pages(page); in migrate_page_move_mapping()
392 /* Anonymous page without mapping */ in migrate_page_move_mapping()
393 if (page_count(page) != expected_count) in migrate_page_move_mapping()
397 newpage->index = page->index; in migrate_page_move_mapping()
398 newpage->mapping = page->mapping; in migrate_page_move_mapping()
399 if (PageSwapBacked(page)) in migrate_page_move_mapping()
405 oldzone = page_zone(page); in migrate_page_move_mapping()
409 if (page_count(page) != expected_count || xas_load(&xas) != page) { in migrate_page_move_mapping()
414 if (!page_ref_freeze(page, expected_count)) { in migrate_page_move_mapping()
420 * Now we know that no one else is looking at the page: in migrate_page_move_mapping()
423 newpage->index = page->index; in migrate_page_move_mapping()
424 newpage->mapping = page->mapping; in migrate_page_move_mapping()
426 if (PageSwapBacked(page)) { in migrate_page_move_mapping()
428 if (PageSwapCache(page)) { in migrate_page_move_mapping()
430 set_page_private(newpage, page_private(page)); in migrate_page_move_mapping()
433 VM_BUG_ON_PAGE(PageSwapCache(page), page); in migrate_page_move_mapping()
436 /* Move dirty while page refs frozen and newpage not yet exposed */ in migrate_page_move_mapping()
437 dirty = PageDirty(page); in migrate_page_move_mapping()
439 ClearPageDirty(page); in migrate_page_move_mapping()
444 if (PageTransHuge(page)) { in migrate_page_move_mapping()
454 * Drop cache reference from old page by unfreezing in migrate_page_move_mapping()
458 page_ref_unfreeze(page, expected_count - nr); in migrate_page_move_mapping()
465 * the page for that zone. Other VM counters will be in migrate_page_move_mapping()
467 * new page and drop references to the old page. in migrate_page_move_mapping()
477 memcg = page_memcg(page); in migrate_page_move_mapping()
483 if (PageSwapBacked(page) && !PageSwapCache(page)) { in migrate_page_move_mapping()
505 struct page *newpage, struct page *page) in migrate_huge_page_move_mapping() argument
507 XA_STATE(xas, &mapping->i_pages, page_index(page)); in migrate_huge_page_move_mapping()
511 expected_count = 2 + page_has_private(page); in migrate_huge_page_move_mapping()
512 if (page_count(page) != expected_count || xas_load(&xas) != page) { in migrate_huge_page_move_mapping()
517 if (!page_ref_freeze(page, expected_count)) { in migrate_huge_page_move_mapping()
522 newpage->index = page->index; in migrate_huge_page_move_mapping()
523 newpage->mapping = page->mapping; in migrate_huge_page_move_mapping()
529 page_ref_unfreeze(page, expected_count - 1); in migrate_huge_page_move_mapping()
537 * Gigantic pages are so large that we do not guarantee that page++ pointer
538 * arithmetic will work across the entire page. We need something more
541 static void __copy_gigantic_page(struct page *dst, struct page *src, in __copy_gigantic_page()
545 struct page *dst_base = dst; in __copy_gigantic_page()
546 struct page *src_base = src; in __copy_gigantic_page()
558 static void copy_huge_page(struct page *dst, struct page *src) in copy_huge_page()
564 /* hugetlbfs page */ in copy_huge_page()
573 /* thp page */ in copy_huge_page()
585 * Copy the page to its new location
587 void migrate_page_states(struct page *newpage, struct page *page) in migrate_page_states() argument
591 trace_android_vh_migrate_page_states(page, newpage); in migrate_page_states()
593 if (PageError(page)) in migrate_page_states()
595 if (PageReferenced(page)) in migrate_page_states()
597 if (PageUptodate(page)) in migrate_page_states()
599 if (TestClearPageActive(page)) { in migrate_page_states()
600 VM_BUG_ON_PAGE(PageUnevictable(page), page); in migrate_page_states()
602 } else if (TestClearPageUnevictable(page)) in migrate_page_states()
604 if (PageWorkingset(page)) in migrate_page_states()
606 if (PageChecked(page)) in migrate_page_states()
608 if (PageMappedToDisk(page)) in migrate_page_states()
610 trace_android_vh_look_around_migrate_page(page, newpage); in migrate_page_states()
613 if (PageDirty(page)) in migrate_page_states()
616 if (page_is_young(page)) in migrate_page_states()
618 if (page_is_idle(page)) in migrate_page_states()
622 * Copy NUMA information to the new page, to prevent over-eager in migrate_page_states()
623 * future migrations of this same page. in migrate_page_states()
625 cpupid = page_cpupid_xchg_last(page, -1); in migrate_page_states()
628 ksm_migrate_page(newpage, page); in migrate_page_states()
633 if (PageSwapCache(page)) in migrate_page_states()
634 ClearPageSwapCache(page); in migrate_page_states()
635 ClearPagePrivate(page); in migrate_page_states()
636 set_page_private(page, 0); in migrate_page_states()
639 * If any waiters have accumulated on the new page then in migrate_page_states()
650 if (PageReadahead(page)) in migrate_page_states()
653 copy_page_owner(page, newpage); in migrate_page_states()
655 if (!PageHuge(page)) in migrate_page_states()
656 mem_cgroup_migrate(page, newpage); in migrate_page_states()
660 void migrate_page_copy(struct page *newpage, struct page *page) in migrate_page_copy() argument
662 if (PageHuge(page) || PageTransHuge(page)) in migrate_page_copy()
663 copy_huge_page(newpage, page); in migrate_page_copy()
665 copy_highpage(newpage, page); in migrate_page_copy()
667 migrate_page_states(newpage, page); in migrate_page_copy()
676 * Common logic to directly migrate a single LRU page suitable for
682 struct page *newpage, struct page *page, in migrate_page() argument
687 BUG_ON(PageWriteback(page)); /* Writeback must be complete */ in migrate_page()
689 rc = migrate_page_move_mapping(mapping, newpage, page, 0); in migrate_page()
695 migrate_page_copy(newpage, page); in migrate_page()
697 migrate_page_states(newpage, page); in migrate_page()
742 struct page *newpage, struct page *page, enum migrate_mode mode, in __buffer_migrate_page() argument
749 if (!page_has_buffers(page)) in __buffer_migrate_page()
750 return migrate_page(mapping, newpage, page, mode); in __buffer_migrate_page()
752 /* Check whether page does not have extra refs before we do more work */ in __buffer_migrate_page()
753 expected_count = expected_page_refs(mapping, page); in __buffer_migrate_page()
754 if (page_count(page) != expected_count) in __buffer_migrate_page()
757 head = page_buffers(page); in __buffer_migrate_page()
788 rc = migrate_page_move_mapping(mapping, newpage, page, 0); in __buffer_migrate_page()
792 attach_page_private(newpage, detach_page_private(page)); in __buffer_migrate_page()
802 migrate_page_copy(newpage, page); in __buffer_migrate_page()
804 migrate_page_states(newpage, page); in __buffer_migrate_page()
822 * if the underlying filesystem guarantees that no other references to "page"
823 * exist. For example attached buffer heads are accessed only under page lock.
826 struct page *newpage, struct page *page, enum migrate_mode mode) in buffer_migrate_page() argument
828 return __buffer_migrate_page(mapping, newpage, page, mode, false); in buffer_migrate_page()
839 struct page *newpage, struct page *page, enum migrate_mode mode) in buffer_migrate_page_norefs() argument
841 return __buffer_migrate_page(mapping, newpage, page, mode, true); in buffer_migrate_page_norefs()
846 * Writeback a page to clean the dirty state
848 static int writeout(struct address_space *mapping, struct page *page) in writeout() argument
863 if (!clear_page_dirty_for_io(page)) in writeout()
868 * A dirty page may imply that the underlying filesystem has in writeout()
869 * the page on some queue. So the page must be clean for in writeout()
871 * page state is no longer what we checked for earlier. in writeout()
875 remove_migration_ptes(page, page, false); in writeout()
877 rc = mapping->a_ops->writepage(page, &wbc); in writeout()
881 lock_page(page); in writeout()
890 struct page *newpage, struct page *page, enum migrate_mode mode) in fallback_migrate_page() argument
892 if (PageDirty(page)) { in fallback_migrate_page()
901 return writeout(mapping, page); in fallback_migrate_page()
908 if (page_has_private(page) && in fallback_migrate_page()
909 !try_to_release_page(page, GFP_KERNEL)) in fallback_migrate_page()
912 return migrate_page(mapping, newpage, page, mode); in fallback_migrate_page()
916 * Move a page to a newly allocated page
917 * The page is locked and all ptes have been successfully removed.
919 * The new page will have replaced the old page if this function
926 static int move_to_new_page(struct page *newpage, struct page *page, in move_to_new_page() argument
931 bool is_lru = !__PageMovable(page); in move_to_new_page()
933 VM_BUG_ON_PAGE(!PageLocked(page), page); in move_to_new_page()
936 mapping = page_mapping(page); in move_to_new_page()
940 rc = migrate_page(mapping, newpage, page, mode); in move_to_new_page()
947 * for page migration. in move_to_new_page()
950 page, mode); in move_to_new_page()
953 page, mode); in move_to_new_page()
956 * In case of non-lru page, it could be released after in move_to_new_page()
959 VM_BUG_ON_PAGE(!PageIsolated(page), page); in move_to_new_page()
960 if (!PageMovable(page)) { in move_to_new_page()
962 ClearPageIsolated(page); in move_to_new_page()
967 page, mode); in move_to_new_page()
969 !PageIsolated(page)); in move_to_new_page()
973 * When successful, old pagecache page->mapping must be cleared before in move_to_new_page()
974 * page is freed; but stats require that PageAnon be left as PageAnon. in move_to_new_page()
977 if (__PageMovable(page)) { in move_to_new_page()
978 VM_BUG_ON_PAGE(!PageIsolated(page), page); in move_to_new_page()
982 * cannot try to migrate this page. in move_to_new_page()
984 ClearPageIsolated(page); in move_to_new_page()
988 * Anonymous and movable page->mapping will be cleared by in move_to_new_page()
992 if (!PageMappingFlags(page)) in move_to_new_page()
993 page->mapping = NULL; in move_to_new_page()
1006 static int __unmap_and_move(struct page *page, struct page *newpage, in __unmap_and_move() argument
1012 bool is_lru = !__PageMovable(page); in __unmap_and_move()
1014 if (!trylock_page(page)) { in __unmap_and_move()
1020 * For example, during page readahead pages are added locked in __unmap_and_move()
1025 * second or third page, the process can end up locking in __unmap_and_move()
1026 * the same page twice and deadlocking. Rather than in __unmap_and_move()
1034 lock_page(page); in __unmap_and_move()
1037 if (PageWriteback(page)) { in __unmap_and_move()
1054 wait_on_page_writeback(page); in __unmap_and_move()
1058 * By try_to_unmap(), page->mapcount goes down to 0 here. In this case, in __unmap_and_move()
1059 * we cannot notice that anon_vma is freed while we migrates a page. in __unmap_and_move()
1063 * just care Anon page here. in __unmap_and_move()
1068 * because that implies that the anon page is no longer mapped in __unmap_and_move()
1069 * (and cannot be remapped so long as we hold the page lock). in __unmap_and_move()
1071 if (PageAnon(page) && !PageKsm(page)) in __unmap_and_move()
1072 anon_vma = page_get_anon_vma(page); in __unmap_and_move()
1075 * Block others from accessing the new page when we get around to in __unmap_and_move()
1086 rc = move_to_new_page(newpage, page, mode); in __unmap_and_move()
1092 * 1. When a new swap-cache page is read into, it is added to the LRU in __unmap_and_move()
1094 * Calling try_to_unmap() against a page->mapping==NULL page will in __unmap_and_move()
1096 * 2. An orphaned page (see truncate_complete_page) might have in __unmap_and_move()
1097 * fs-private metadata. The page can be picked up due to memory in __unmap_and_move()
1098 * offlining. Everywhere else except page reclaim, the page is in __unmap_and_move()
1099 * invisible to the vm, so the page can not be migrated. So try to in __unmap_and_move()
1100 * free the metadata, so the page can be freed. in __unmap_and_move()
1102 if (!page->mapping) { in __unmap_and_move()
1103 VM_BUG_ON_PAGE(PageAnon(page), page); in __unmap_and_move()
1104 if (page_has_private(page)) { in __unmap_and_move()
1105 try_to_free_buffers(page); in __unmap_and_move()
1108 } else if (page_mapped(page)) { in __unmap_and_move()
1110 VM_BUG_ON_PAGE(PageAnon(page) && !PageKsm(page) && !anon_vma, in __unmap_and_move()
1111 page); in __unmap_and_move()
1112 try_to_unmap(page, TTU_MIGRATION|TTU_IGNORE_MLOCK); in __unmap_and_move()
1116 if (!page_mapped(page)) in __unmap_and_move()
1117 rc = move_to_new_page(newpage, page, mode); in __unmap_and_move()
1120 remove_migration_ptes(page, in __unmap_and_move()
1121 rc == MIGRATEPAGE_SUCCESS ? newpage : page, false); in __unmap_and_move()
1129 unlock_page(page); in __unmap_and_move()
1133 * which will not free the page because new page owner increased in __unmap_and_move()
1134 * refcounter. As well, if it is LRU page, add the page to LRU in __unmap_and_move()
1135 * list in here. Use the old state of the isolated source page to in __unmap_and_move()
1136 * determine if we migrated a LRU page. newpage was already unlocked in __unmap_and_move()
1137 * and possibly modified by its owner - don't rely on the page in __unmap_and_move()
1151 * Obtain the lock on page, remove all ptes and migrate the page
1152 * to the newly allocated page in newpage.
1156 unsigned long private, struct page *page, in unmap_and_move() argument
1161 struct page *newpage = NULL; in unmap_and_move()
1163 if (!thp_migration_supported() && PageTransHuge(page)) in unmap_and_move()
1166 if (page_count(page) == 1) { in unmap_and_move()
1167 /* page was freed from under us. So we are done. */ in unmap_and_move()
1168 ClearPageActive(page); in unmap_and_move()
1169 ClearPageUnevictable(page); in unmap_and_move()
1170 if (unlikely(__PageMovable(page))) { in unmap_and_move()
1171 lock_page(page); in unmap_and_move()
1172 if (!PageMovable(page)) in unmap_and_move()
1173 ClearPageIsolated(page); in unmap_and_move()
1174 unlock_page(page); in unmap_and_move()
1179 newpage = get_new_page(page, private); in unmap_and_move()
1183 rc = __unmap_and_move(page, newpage, force, mode); in unmap_and_move()
1190 * A page that has been migrated has all references in unmap_and_move()
1191 * removed and will be freed. A page that has not been in unmap_and_move()
1194 list_del(&page->lru); in unmap_and_move()
1201 if (likely(!__PageMovable(page))) in unmap_and_move()
1202 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + in unmap_and_move()
1203 page_is_file_lru(page), -thp_nr_pages(page)); in unmap_and_move()
1208 * isolation. Otherwise, restore the page to right list unless in unmap_and_move()
1214 * We release the page in page_handle_poison. in unmap_and_move()
1216 put_page(page); in unmap_and_move()
1219 if (likely(!__PageMovable(page))) { in unmap_and_move()
1220 putback_lru_page(page); in unmap_and_move()
1224 lock_page(page); in unmap_and_move()
1225 if (PageMovable(page)) in unmap_and_move()
1226 putback_movable_page(page); in unmap_and_move()
1228 ClearPageIsolated(page); in unmap_and_move()
1229 unlock_page(page); in unmap_and_move()
1230 put_page(page); in unmap_and_move()
1250 * count of the head page (i.e. if all subpages of a 2MB hugepage are
1251 * under direct I/O, the reference of the head page is 512 and a bit more.)
1256 * There is also no race when direct I/O is issued on the page under migration,
1258 * will wait in the page fault for migration to complete.
1262 struct page *hpage, int force, in unmap_and_move_huge_page()
1267 struct page *new_hpage; in unmap_and_move_huge_page()
1274 * like soft offline and memory hotremove don't walk through page in unmap_and_move_huge_page()
1302 * page_mapping() set, hugetlbfs specific move page routine will not in unmap_and_move_huge_page()
1382 * supplied as the target for the page migration
1386 * as the target of the page migration.
1391 * page migration, if any.
1392 * @reason: The reason for page migration.
1414 struct page *page; in migrate_pages() local
1415 struct page *page2; in migrate_pages()
1428 list_for_each_entry_safe(page, page2, from, lru) { in migrate_pages()
1431 * THP statistics is based on the source huge page. in migrate_pages()
1435 is_thp = PageTransHuge(page) && !PageHuge(page); in migrate_pages()
1436 nr_subpages = thp_nr_pages(page); in migrate_pages()
1439 if (PageHuge(page)) in migrate_pages()
1441 put_new_page, private, page, in migrate_pages()
1445 private, page, pass > 2, mode, in migrate_pages()
1453 * retry on the same page with the THP split in migrate_pages()
1456 * Head page is retried immediately and tail in migrate_pages()
1462 lock_page(page); in migrate_pages()
1463 rc = split_huge_page_to_list(page, from); in migrate_pages()
1464 unlock_page(page); in migrate_pages()
1466 list_safe_reset_next(page, page2, lru); in migrate_pages()
1495 * unlike -EAGAIN case, the failed page is in migrate_pages()
1496 * removed from migration page list and not in migrate_pages()
1528 struct page *alloc_migration_target(struct page *page, unsigned long private) in alloc_migration_target() argument
1533 struct page *new_page = NULL; in alloc_migration_target()
1541 nid = page_to_nid(page); in alloc_migration_target()
1543 if (PageHuge(page)) { in alloc_migration_target()
1544 struct hstate *h = page_hstate(compound_head(page)); in alloc_migration_target()
1550 if (PageTransHuge(page)) { in alloc_migration_target()
1559 zidx = zone_idx(page_zone(page)); in alloc_migration_target()
1601 * Resolves the given address to a struct page, isolates it from the LRU and
1604 * errno - if the page cannot be found/isolated
1613 struct page *page; in add_page_for_migration() local
1625 page = follow_page(vma, addr, follflags); in add_page_for_migration()
1627 err = PTR_ERR(page); in add_page_for_migration()
1628 if (IS_ERR(page)) in add_page_for_migration()
1632 if (!page) in add_page_for_migration()
1636 if (page_to_nid(page) == node) in add_page_for_migration()
1640 if (page_mapcount(page) > 1 && !migrate_all) in add_page_for_migration()
1643 if (PageHuge(page)) { in add_page_for_migration()
1644 if (PageHead(page)) { in add_page_for_migration()
1645 isolate_huge_page(page, pagelist); in add_page_for_migration()
1649 struct page *head; in add_page_for_migration()
1651 head = compound_head(page); in add_page_for_migration()
1665 * isolate_lru_page() or drop the page ref if it was in add_page_for_migration()
1668 put_user_page(page); in add_page_for_migration()
1701 * Migrate an array of page address onto an array of nodes and fill
1752 * Errors in the page lookup or isolation are not fatal and we simply in do_pages_move()
1759 /* The page is successfully queued for migration */ in do_pages_move()
1764 * If the page is already on the target node (!err), store the in do_pages_move()
1801 struct page *page; in do_pages_stat_array() local
1809 page = follow_page(vma, addr, FOLL_DUMP); in do_pages_stat_array()
1811 err = PTR_ERR(page); in do_pages_stat_array()
1812 if (IS_ERR(page)) in do_pages_stat_array()
1815 err = page ? page_to_nid(page) : -ENOENT; in do_pages_stat_array()
1998 static struct page *alloc_misplaced_dst_page(struct page *page, in alloc_misplaced_dst_page() argument
2002 struct page *newpage; in alloc_misplaced_dst_page()
2013 static int numamigrate_isolate_page(pg_data_t *pgdat, struct page *page) in numamigrate_isolate_page() argument
2017 VM_BUG_ON_PAGE(compound_order(page) && !PageTransHuge(page), page); in numamigrate_isolate_page()
2020 if (!migrate_balanced_pgdat(pgdat, compound_nr(page))) in numamigrate_isolate_page()
2023 if (isolate_lru_page(page)) in numamigrate_isolate_page()
2027 * migrate_misplaced_transhuge_page() skips page migration's usual in numamigrate_isolate_page()
2028 * check on page_count(), so we must do it here, now that the page in numamigrate_isolate_page()
2030 * The expected page count is 3: 1 for page's mapcount and 1 for the in numamigrate_isolate_page()
2033 if (PageTransHuge(page) && page_count(page) != 3) { in numamigrate_isolate_page()
2034 putback_lru_page(page); in numamigrate_isolate_page()
2038 page_lru = page_is_file_lru(page); in numamigrate_isolate_page()
2039 mod_node_page_state(page_pgdat(page), NR_ISOLATED_ANON + page_lru, in numamigrate_isolate_page()
2040 thp_nr_pages(page)); in numamigrate_isolate_page()
2043 * Isolating the page has taken another reference, so the in numamigrate_isolate_page()
2044 * caller's reference can be safely dropped without the page in numamigrate_isolate_page()
2047 put_page(page); in numamigrate_isolate_page()
2053 struct page *page = pmd_page(pmd); in pmd_trans_migrating() local
2054 return PageLocked(page); in pmd_trans_migrating()
2058 * Attempt to migrate a misplaced page to the specified destination
2060 * the page that will be dropped by this function before returning.
2062 int migrate_misplaced_page(struct page *page, struct vm_fault *vmf, in migrate_misplaced_page() argument
2074 if (page_mapcount(page) != 1 && page_is_file_lru(page) && in migrate_misplaced_page()
2082 if (page_is_file_lru(page) && PageDirty(page)) in migrate_misplaced_page()
2085 isolated = numamigrate_isolate_page(pgdat, page); in migrate_misplaced_page()
2089 list_add(&page->lru, &migratepages); in migrate_misplaced_page()
2095 list_del(&page->lru); in migrate_misplaced_page()
2096 dec_node_page_state(page, NR_ISOLATED_ANON + in migrate_misplaced_page()
2097 page_is_file_lru(page)); in migrate_misplaced_page()
2098 putback_lru_page(page); in migrate_misplaced_page()
2107 put_page(page); in migrate_misplaced_page()
2114 * Migrates a THP to a given target node. page must be locked and is unlocked
2121 struct page *page, int node) in migrate_misplaced_transhuge_page() argument
2126 struct page *new_page = NULL; in migrate_misplaced_transhuge_page()
2127 int page_lru = page_is_file_lru(page); in migrate_misplaced_transhuge_page()
2137 isolated = numamigrate_isolate_page(pgdat, page); in migrate_misplaced_transhuge_page()
2143 /* Prepare a page as a migration target */ in migrate_misplaced_transhuge_page()
2145 if (PageSwapBacked(page)) in migrate_misplaced_transhuge_page()
2148 /* anon mapping, we can simply copy page->mapping to the new page: */ in migrate_misplaced_transhuge_page()
2149 new_page->mapping = page->mapping; in migrate_misplaced_transhuge_page()
2150 new_page->index = page->index; in migrate_misplaced_transhuge_page()
2153 migrate_page_copy(new_page, page); in migrate_misplaced_transhuge_page()
2158 if (unlikely(!pmd_same(*pmd, entry) || !page_ref_freeze(page, 2))) { in migrate_misplaced_transhuge_page()
2163 SetPageActive(page); in migrate_misplaced_transhuge_page()
2165 SetPageUnevictable(page); in migrate_misplaced_transhuge_page()
2171 get_page(page); in migrate_misplaced_transhuge_page()
2172 putback_lru_page(page); in migrate_misplaced_transhuge_page()
2173 mod_node_page_state(page_pgdat(page), in migrate_misplaced_transhuge_page()
2185 * page blocking on the page lock, block on the page table in migrate_misplaced_transhuge_page()
2186 * lock or observe the new page. The SetPageUptodate on the in migrate_misplaced_transhuge_page()
2187 * new page and page_add_new_anon_rmap guarantee the copy is in migrate_misplaced_transhuge_page()
2205 page_ref_unfreeze(page, 2); in migrate_misplaced_transhuge_page()
2206 mlock_migrate_page(new_page, page); in migrate_misplaced_transhuge_page()
2207 page_remove_rmap(page, true); in migrate_misplaced_transhuge_page()
2212 /* Take an "isolate" reference and put new page on the LRU. */ in migrate_misplaced_transhuge_page()
2217 unlock_page(page); in migrate_misplaced_transhuge_page()
2218 put_page(page); /* Drop the rmap reference */ in migrate_misplaced_transhuge_page()
2219 put_page(page); /* Drop the LRU isolation reference */ in migrate_misplaced_transhuge_page()
2224 mod_node_page_state(page_pgdat(page), in migrate_misplaced_transhuge_page()
2240 unlock_page(page); in migrate_misplaced_transhuge_page()
2241 put_page(page); in migrate_misplaced_transhuge_page()
2309 struct page *page; in migrate_vma_collect_pmd() local
2317 page = pmd_page(*pmdp); in migrate_vma_collect_pmd()
2318 if (is_huge_zero_page(page)) { in migrate_vma_collect_pmd()
2327 get_page(page); in migrate_vma_collect_pmd()
2329 if (unlikely(!trylock_page(page))) in migrate_vma_collect_pmd()
2332 ret = split_huge_page(page); in migrate_vma_collect_pmd()
2333 unlock_page(page); in migrate_vma_collect_pmd()
2334 put_page(page); in migrate_vma_collect_pmd()
2352 struct page *page; in migrate_vma_collect_pmd() local
2368 * Only care about unaddressable device page special in migrate_vma_collect_pmd()
2369 * page table entry. Other special swap entries are not in migrate_vma_collect_pmd()
2370 * migratable, and we ignore regular swapped page. in migrate_vma_collect_pmd()
2376 page = device_private_entry_to_page(entry); in migrate_vma_collect_pmd()
2379 page->pgmap->owner != migrate->pgmap_owner) in migrate_vma_collect_pmd()
2382 mpfn = migrate_pfn(page_to_pfn(page)) | in migrate_vma_collect_pmd()
2395 page = vm_normal_page(migrate->vma, addr, pte); in migrate_vma_collect_pmd()
2401 if (!page || !page->mapping || PageTransCompound(page)) { in migrate_vma_collect_pmd()
2407 * By getting a reference on the page we pin it and that blocks in migrate_vma_collect_pmd()
2411 * We drop this reference after isolating the page from the lru in migrate_vma_collect_pmd()
2412 * for non device page (device page are not on the lru and thus in migrate_vma_collect_pmd()
2415 get_page(page); in migrate_vma_collect_pmd()
2419 * Optimize for the common case where page is only mapped once in migrate_vma_collect_pmd()
2420 * in one process. If we can lock the page, then we can safely in migrate_vma_collect_pmd()
2421 * set up a special migration page table entry now. in migrate_vma_collect_pmd()
2423 if (trylock_page(page)) { in migrate_vma_collect_pmd()
2429 /* Setup special migration page table entry */ in migrate_vma_collect_pmd()
2430 entry = make_migration_entry(page, mpfn & in migrate_vma_collect_pmd()
2448 * drop page refcount. Page won't be freed, as we took in migrate_vma_collect_pmd()
2451 page_remove_rmap(page, false); in migrate_vma_collect_pmd()
2452 put_page(page); in migrate_vma_collect_pmd()
2482 * This will walk the CPU page table. For each virtual address backed by a
2483 * valid page, it updates the src array and takes a reference on the page, in
2484 * order to pin the page until we lock it and unmap it.
2493 * private page mappings that won't be migrated. in migrate_vma_collect()
2508 * migrate_vma_check_page() - check if page is pinned or not
2509 * @page: struct page to check
2513 * ZONE_DEVICE page.
2515 static bool migrate_vma_check_page(struct page *page) in migrate_vma_check_page() argument
2519 * isolate_lru_page() for a regular page, or migrate_vma_collect() for in migrate_vma_check_page()
2520 * a device page. in migrate_vma_check_page()
2525 * FIXME support THP (transparent huge page), it is bit more complex to in migrate_vma_check_page()
2529 if (PageCompound(page)) in migrate_vma_check_page()
2532 /* Page from ZONE_DEVICE have one extra reference */ in migrate_vma_check_page()
2533 if (is_zone_device_page(page)) { in migrate_vma_check_page()
2535 * Private page can never be pin as they have no valid pte and in migrate_vma_check_page()
2538 * will bump the page reference count. Sadly there is no way to in migrate_vma_check_page()
2545 * it does not need to take a reference on page. in migrate_vma_check_page()
2547 return is_device_private_page(page); in migrate_vma_check_page()
2550 /* For file back page */ in migrate_vma_check_page()
2551 if (page_mapping(page)) in migrate_vma_check_page()
2552 extra += 1 + page_has_private(page); in migrate_vma_check_page()
2554 if ((page_count(page) - extra) > page_mapcount(page)) in migrate_vma_check_page()
2565 * page is locked it is isolated from the lru (for non-device pages). Finally,
2579 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_prepare() local
2582 if (!page) in migrate_vma_prepare()
2589 * are waiting on each other page lock. in migrate_vma_prepare()
2592 * for any page we can not lock right away. in migrate_vma_prepare()
2594 if (!trylock_page(page)) { in migrate_vma_prepare()
2597 put_page(page); in migrate_vma_prepare()
2605 if (!is_zone_device_page(page)) { in migrate_vma_prepare()
2606 if (!PageLRU(page) && allow_drain) { in migrate_vma_prepare()
2612 if (isolate_lru_page(page)) { in migrate_vma_prepare()
2619 unlock_page(page); in migrate_vma_prepare()
2621 put_page(page); in migrate_vma_prepare()
2627 put_page(page); in migrate_vma_prepare()
2630 if (!migrate_vma_check_page(page)) { in migrate_vma_prepare()
2636 if (!is_zone_device_page(page)) { in migrate_vma_prepare()
2637 get_page(page); in migrate_vma_prepare()
2638 putback_lru_page(page); in migrate_vma_prepare()
2642 unlock_page(page); in migrate_vma_prepare()
2645 if (!is_zone_device_page(page)) in migrate_vma_prepare()
2646 putback_lru_page(page); in migrate_vma_prepare()
2648 put_page(page); in migrate_vma_prepare()
2654 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_prepare() local
2656 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) in migrate_vma_prepare()
2659 remove_migration_pte(page, migrate->vma, addr, page); in migrate_vma_prepare()
2662 unlock_page(page); in migrate_vma_prepare()
2663 put_page(page); in migrate_vma_prepare()
2669 * migrate_vma_unmap() - replace page mapping with special migration pte entry
2672 * Replace page mapping (CPU page table pte) with a special migration pte entry
2677 * destination memory and copy contents of original page over to new page.
2687 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_unmap() local
2689 if (!page || !(migrate->src[i] & MIGRATE_PFN_MIGRATE)) in migrate_vma_unmap()
2692 if (page_mapped(page)) { in migrate_vma_unmap()
2693 try_to_unmap(page, flags); in migrate_vma_unmap()
2694 if (page_mapped(page)) in migrate_vma_unmap()
2698 if (migrate_vma_check_page(page)) in migrate_vma_unmap()
2708 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_unmap() local
2710 if (!page || (migrate->src[i] & MIGRATE_PFN_MIGRATE)) in migrate_vma_unmap()
2713 remove_migration_ptes(page, page, false); in migrate_vma_unmap()
2716 unlock_page(page); in migrate_vma_unmap()
2719 if (is_zone_device_page(page)) in migrate_vma_unmap()
2720 put_page(page); in migrate_vma_unmap()
2722 putback_lru_page(page); in migrate_vma_unmap()
2736 * and unmapped, check whether each page is pinned or not. Pages that aren't
2745 * page and with the MIGRATE_PFN_VALID and MIGRATE_PFN_LOCKED flags set
2750 * device memory to system memory. If the caller cannot migrate a device page
2755 * For empty entries inside CPU page table (pte_none() or pmd_none() is true) we
2767 * then migrate_vma_pages() to migrate struct page information from the source
2768 * struct page to the destination struct page. If it fails to migrate the
2769 * struct page information, then it clears the MIGRATE_PFN_MIGRATE flag in the
2780 * It is safe to update device page table after migrate_vma_pages() because
2781 * both destination and source page are still locked, and the mmap_lock is held
2784 * Once the caller is done cleaning up things and updating its page table (if it
2786 * migrate_vma_finalize() to update the CPU page table to point to new pages
2787 * for successfully migrated pages or otherwise restore the CPU page table to
2835 * to map in an anonymous zero page but the struct page will be a ZONE_DEVICE
2836 * private page.
2840 struct page *page, in migrate_vma_insert_page() argument
2892 if (mem_cgroup_charge(page, vma->vm_mm, GFP_KERNEL)) in migrate_vma_insert_page()
2897 * preceding stores to the page contents become visible before in migrate_vma_insert_page()
2900 __SetPageUptodate(page); in migrate_vma_insert_page()
2902 if (is_zone_device_page(page)) { in migrate_vma_insert_page()
2903 if (is_device_private_page(page)) { in migrate_vma_insert_page()
2906 swp_entry = make_device_private_entry(page, vma->vm_flags & VM_WRITE); in migrate_vma_insert_page()
2913 pr_warn_once("Unsupported ZONE_DEVICE page type.\n"); in migrate_vma_insert_page()
2917 entry = mk_pte(page, vma->vm_page_prot); in migrate_vma_insert_page()
2944 page_add_new_anon_rmap(page, vma, addr, false); in migrate_vma_insert_page()
2945 if (!is_zone_device_page(page)) in migrate_vma_insert_page()
2946 lru_cache_add_inactive_or_unevictable(page, vma); in migrate_vma_insert_page()
2947 get_page(page); in migrate_vma_insert_page()
2971 * migrate_vma_pages() - migrate meta-data from src page to dst page
2974 * This migrates struct page meta-data from source struct page to destination
2975 * struct page. This effectively finishes the migration from source page to the
2976 * destination page.
2987 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); in migrate_vma_pages()
2988 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_pages() local
2997 if (!page) { in migrate_vma_pages()
3016 mapping = page_mapping(page); in migrate_vma_pages()
3030 * Other types of ZONE_DEVICE page are not in migrate_vma_pages()
3038 r = migrate_page(mapping, newpage, page, MIGRATE_SYNC_NO_COPY); in migrate_vma_pages()
3054 * migrate_vma_finalize() - restore CPU page table entry
3058 * new page if migration was successful for that page, or to the original page
3070 struct page *newpage = migrate_pfn_to_page(migrate->dst[i]); in migrate_vma_finalize()
3071 struct page *page = migrate_pfn_to_page(migrate->src[i]); in migrate_vma_finalize() local
3073 if (!page) { in migrate_vma_finalize()
3086 newpage = page; in migrate_vma_finalize()
3089 remove_migration_ptes(page, newpage, false); in migrate_vma_finalize()
3090 unlock_page(page); in migrate_vma_finalize()
3092 if (is_zone_device_page(page)) in migrate_vma_finalize()
3093 put_page(page); in migrate_vma_finalize()
3095 putback_lru_page(page); in migrate_vma_finalize()
3097 if (newpage != page) { in migrate_vma_finalize()