Lines Matching full:page
46 /* How many pages do we try to swap or page in/out together? */
81 static void __page_cache_release(struct page *page) in __page_cache_release() argument
83 if (PageLRU(page)) { in __page_cache_release()
84 pg_data_t *pgdat = page_pgdat(page); in __page_cache_release()
89 lruvec = mem_cgroup_page_lruvec(page, pgdat); in __page_cache_release()
90 VM_BUG_ON_PAGE(!PageLRU(page), page); in __page_cache_release()
91 __ClearPageLRU(page); in __page_cache_release()
92 del_page_from_lru_list(page, lruvec, page_off_lru(page)); in __page_cache_release()
95 __ClearPageWaiters(page); in __page_cache_release()
98 static void __put_single_page(struct page *page) in __put_single_page() argument
100 __page_cache_release(page); in __put_single_page()
101 mem_cgroup_uncharge(page); in __put_single_page()
102 free_unref_page(page); in __put_single_page()
105 static void __put_compound_page(struct page *page) in __put_compound_page() argument
109 * hugetlb. This is because hugetlb page does never have PageLRU set in __put_compound_page()
113 if (!PageHuge(page)) in __put_compound_page()
114 __page_cache_release(page); in __put_compound_page()
115 destroy_compound_page(page); in __put_compound_page()
118 void __put_page(struct page *page) in __put_page() argument
120 if (is_zone_device_page(page)) { in __put_page()
121 put_dev_pagemap(page->pgmap); in __put_page()
124 * The page belongs to the device that created pgmap. Do in __put_page()
125 * not return it to page allocator. in __put_page()
130 if (unlikely(PageCompound(page))) in __put_page()
131 __put_compound_page(page); in __put_page()
133 __put_single_page(page); in __put_page()
139 * @pages: list of pages threaded on page->lru
141 * Release a list of pages which are strung together on page.lru. Currently
147 struct page *victim; in put_pages_list()
166 * were pinned, returns -errno. Each page returned must be released
170 struct page **pages) in get_kernel_pages()
187 * get_kernel_page() - pin a kernel page in memory
190 * @pages: array that receives pointer to the page pinned.
193 * Returns 1 if page is pinned. If the page was not pinned, returns
194 * -errno. The page returned must be released with a put_page() call
197 int get_kernel_page(unsigned long start, int write, struct page **pages) in get_kernel_page()
209 void (*move_fn)(struct page *page, struct lruvec *lruvec, void *arg), in pagevec_lru_move_fn() argument
218 struct page *page = pvec->pages[i]; in pagevec_lru_move_fn() local
219 struct pglist_data *pagepgdat = page_pgdat(page); in pagevec_lru_move_fn()
228 lruvec = mem_cgroup_page_lruvec(page, pgdat); in pagevec_lru_move_fn()
229 (*move_fn)(page, lruvec, arg); in pagevec_lru_move_fn()
237 static void pagevec_move_tail_fn(struct page *page, struct lruvec *lruvec, in pagevec_move_tail_fn() argument
242 if (PageLRU(page) && !PageUnevictable(page)) { in pagevec_move_tail_fn()
243 del_page_from_lru_list(page, lruvec, page_lru(page)); in pagevec_move_tail_fn()
244 ClearPageActive(page); in pagevec_move_tail_fn()
245 add_page_to_lru_list_tail(page, lruvec, page_lru(page)); in pagevec_move_tail_fn()
246 (*pgmoved) += thp_nr_pages(page); in pagevec_move_tail_fn()
263 static bool pagevec_add_and_need_flush(struct pagevec *pvec, struct page *page) in pagevec_add_and_need_flush() argument
267 if (!pagevec_add(pvec, page) || PageCompound(page) || in pagevec_add_and_need_flush()
275 * Writeback is about to end against a page which has been marked for immediate
279 void rotate_reclaimable_page(struct page *page) in rotate_reclaimable_page() argument
281 if (!PageLocked(page) && !PageDirty(page) && in rotate_reclaimable_page()
282 !PageUnevictable(page) && PageLRU(page)) { in rotate_reclaimable_page()
286 get_page(page); in rotate_reclaimable_page()
289 if (pagevec_add_and_need_flush(pvec, page)) in rotate_reclaimable_page()
326 void lru_note_cost_page(struct page *page) in lru_note_cost_page() argument
328 lru_note_cost(mem_cgroup_page_lruvec(page, page_pgdat(page)), in lru_note_cost_page()
329 page_is_file_lru(page), thp_nr_pages(page)); in lru_note_cost_page()
332 static void __activate_page(struct page *page, struct lruvec *lruvec, in __activate_page() argument
335 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { in __activate_page()
336 int lru = page_lru_base_type(page); in __activate_page()
337 int nr_pages = thp_nr_pages(page); in __activate_page()
339 del_page_from_lru_list(page, lruvec, lru); in __activate_page()
340 SetPageActive(page); in __activate_page()
342 add_page_to_lru_list(page, lruvec, lru); in __activate_page()
343 trace_mm_lru_activate(page); in __activate_page()
365 static void activate_page(struct page *page) in activate_page() argument
367 page = compound_head(page); in activate_page()
368 if (PageLRU(page) && !PageActive(page) && !PageUnevictable(page)) { in activate_page()
373 get_page(page); in activate_page()
374 if (pagevec_add_and_need_flush(pvec, page)) in activate_page()
385 static void activate_page(struct page *page) in activate_page() argument
387 pg_data_t *pgdat = page_pgdat(page); in activate_page()
389 page = compound_head(page); in activate_page()
391 __activate_page(page, mem_cgroup_page_lruvec(page, pgdat), NULL); in activate_page()
396 static void __lru_cache_activate_page(struct page *page) in __lru_cache_activate_page() argument
405 * Search backwards on the optimistic assumption that the page being in __lru_cache_activate_page()
407 * the local pagevec is examined as a !PageLRU page could be in the in __lru_cache_activate_page()
410 * a remote pagevec's page PageActive potentially hits a race where in __lru_cache_activate_page()
411 * a page is marked PageActive just after it is added to the inactive in __lru_cache_activate_page()
415 struct page *pagevec_page = pvec->pages[i]; in __lru_cache_activate_page()
417 if (pagevec_page == page) { in __lru_cache_activate_page()
418 SetPageActive(page); in __lru_cache_activate_page()
427 * Mark a page as having seen activity.
433 * When a newly allocated page is not yet visible, so safe for non-atomic ops,
434 * __SetPageReferenced(page) may be substituted for mark_page_accessed(page).
436 void mark_page_accessed(struct page *page) in mark_page_accessed() argument
438 page = compound_head(page); in mark_page_accessed()
440 trace_android_vh_mark_page_accessed(page); in mark_page_accessed()
441 if (!PageReferenced(page)) { in mark_page_accessed()
442 SetPageReferenced(page); in mark_page_accessed()
443 } else if (PageUnevictable(page)) { in mark_page_accessed()
447 * evictable page accessed has no effect. in mark_page_accessed()
449 } else if (!PageActive(page)) { in mark_page_accessed()
451 * If the page is on the LRU, queue it for activation via in mark_page_accessed()
452 * lru_pvecs.activate_page. Otherwise, assume the page is on a in mark_page_accessed()
456 if (PageLRU(page)) in mark_page_accessed()
457 activate_page(page); in mark_page_accessed()
459 __lru_cache_activate_page(page); in mark_page_accessed()
460 ClearPageReferenced(page); in mark_page_accessed()
461 workingset_activation(page); in mark_page_accessed()
463 if (page_is_idle(page)) in mark_page_accessed()
464 clear_page_idle(page); in mark_page_accessed()
469 * lru_cache_add - add a page to a page list
470 * @page: the page to be added to the LRU.
472 * Queue the page for addition to the LRU via pagevec. The decision on whether
473 * to add the page to the [in]active [file|anon] list is deferred until the
475 * have the page added to the active list using mark_page_accessed().
477 void lru_cache_add(struct page *page) in lru_cache_add() argument
481 VM_BUG_ON_PAGE(PageActive(page) && PageUnevictable(page), page); in lru_cache_add()
482 VM_BUG_ON_PAGE(PageLRU(page), page); in lru_cache_add()
484 get_page(page); in lru_cache_add()
487 if (pagevec_add_and_need_flush(pvec, page)) in lru_cache_add()
495 * @page: the page to be added to LRU
496 * @vma: vma in which page is mapped for determining reclaimability
498 * Place @page on the inactive or unevictable LRU list, depending on its
501 void __lru_cache_add_inactive_or_unevictable(struct page *page, in __lru_cache_add_inactive_or_unevictable() argument
506 VM_BUG_ON_PAGE(PageLRU(page), page); in __lru_cache_add_inactive_or_unevictable()
509 if (unlikely(unevictable) && !TestSetPageMlocked(page)) { in __lru_cache_add_inactive_or_unevictable()
510 int nr_pages = thp_nr_pages(page); in __lru_cache_add_inactive_or_unevictable()
516 __mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages); in __lru_cache_add_inactive_or_unevictable()
519 lru_cache_add(page); in __lru_cache_add_inactive_or_unevictable()
523 * If the page can not be invalidated, it is moved to the
527 * effective than the single-page writeout from reclaim.
529 * If the page isn't page_mapped and dirty/writeback, the page
532 * 1. active, mapped page -> none
533 * 2. active, dirty/writeback page -> inactive, head, PG_reclaim
534 * 3. inactive, mapped page -> none
535 * 4. inactive, dirty/writeback page -> inactive, head, PG_reclaim
539 * In 4, why it moves inactive's head, the VM expects the page would
541 * than the single-page writeout from reclaim.
543 static void lru_deactivate_file_fn(struct page *page, struct lruvec *lruvec, in lru_deactivate_file_fn() argument
548 int nr_pages = thp_nr_pages(page); in lru_deactivate_file_fn()
550 if (!PageLRU(page)) in lru_deactivate_file_fn()
553 if (PageUnevictable(page)) in lru_deactivate_file_fn()
556 /* Some processes are using the page */ in lru_deactivate_file_fn()
557 if (page_mapped(page)) in lru_deactivate_file_fn()
560 active = PageActive(page); in lru_deactivate_file_fn()
561 lru = page_lru_base_type(page); in lru_deactivate_file_fn()
563 del_page_from_lru_list(page, lruvec, lru + active); in lru_deactivate_file_fn()
564 ClearPageActive(page); in lru_deactivate_file_fn()
565 ClearPageReferenced(page); in lru_deactivate_file_fn()
567 if (PageWriteback(page) || PageDirty(page)) { in lru_deactivate_file_fn()
573 add_page_to_lru_list(page, lruvec, lru); in lru_deactivate_file_fn()
574 SetPageReclaim(page); in lru_deactivate_file_fn()
577 * The page's writeback ends up during pagevec in lru_deactivate_file_fn()
578 * We moves tha page into tail of inactive. in lru_deactivate_file_fn()
580 add_page_to_lru_list_tail(page, lruvec, lru); in lru_deactivate_file_fn()
591 static void lru_deactivate_fn(struct page *page, struct lruvec *lruvec, in lru_deactivate_fn() argument
594 if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { in lru_deactivate_fn()
595 int lru = page_lru_base_type(page); in lru_deactivate_fn()
596 int nr_pages = thp_nr_pages(page); in lru_deactivate_fn()
598 del_page_from_lru_list(page, lruvec, lru + LRU_ACTIVE); in lru_deactivate_fn()
599 ClearPageActive(page); in lru_deactivate_fn()
600 ClearPageReferenced(page); in lru_deactivate_fn()
601 add_page_to_lru_list(page, lruvec, lru); in lru_deactivate_fn()
609 static void lru_lazyfree_fn(struct page *page, struct lruvec *lruvec, in lru_lazyfree_fn() argument
612 if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && in lru_lazyfree_fn()
613 !PageSwapCache(page) && !PageUnevictable(page)) { in lru_lazyfree_fn()
614 bool active = PageActive(page); in lru_lazyfree_fn()
615 int nr_pages = thp_nr_pages(page); in lru_lazyfree_fn()
617 del_page_from_lru_list(page, lruvec, in lru_lazyfree_fn()
619 ClearPageActive(page); in lru_lazyfree_fn()
620 ClearPageReferenced(page); in lru_lazyfree_fn()
626 ClearPageSwapBacked(page); in lru_lazyfree_fn()
627 add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE); in lru_lazyfree_fn()
635 static void lru_lazyfree_movetail_fn(struct page *page, struct lruvec *lruvec, in lru_lazyfree_movetail_fn() argument
640 if (PageLRU(page) && !PageUnevictable(page) && PageSwapBacked(page) && in lru_lazyfree_movetail_fn()
641 !PageSwapCache(page)) { in lru_lazyfree_movetail_fn()
642 bool active = PageActive(page); in lru_lazyfree_movetail_fn()
644 del_page_from_lru_list(page, lruvec, in lru_lazyfree_movetail_fn()
646 ClearPageActive(page); in lru_lazyfree_movetail_fn()
647 ClearPageReferenced(page); in lru_lazyfree_movetail_fn()
649 add_page_to_lru_list_tail(page, lruvec, LRU_INACTIVE_FILE); in lru_lazyfree_movetail_fn()
651 add_page_to_lru_list(page, lruvec, LRU_INACTIVE_FILE); in lru_lazyfree_movetail_fn()
698 * deactivate_file_page - forcefully deactivate a file page
699 * @page: page to deactivate
701 * This function hints the VM that @page is a good reclaim candidate,
702 * for example if its invalidation fails due to the page being dirty
705 void deactivate_file_page(struct page *page) in deactivate_file_page() argument
708 * In a workload with many unevictable page such as mprotect, in deactivate_file_page()
709 * unevictable page deactivation for accelerating reclaim is pointless. in deactivate_file_page()
711 if (PageUnevictable(page)) in deactivate_file_page()
714 if (likely(get_page_unless_zero(page))) { in deactivate_file_page()
720 if (pagevec_add_and_need_flush(pvec, page)) in deactivate_file_page()
727 * deactivate_page - deactivate a page
728 * @page: page to deactivate
730 * deactivate_page() moves @page to the inactive list if @page was on the active
731 * list and was not an unevictable page. This is done to accelerate the reclaim
732 * of @page.
734 void deactivate_page(struct page *page) in deactivate_page() argument
736 if (PageLRU(page) && PageActive(page) && !PageUnevictable(page)) { in deactivate_page()
741 get_page(page); in deactivate_page()
742 if (pagevec_add_and_need_flush(pvec, page)) in deactivate_page()
749 * mark_page_lazyfree - make an anon page lazyfree
750 * @page: page to deactivate
752 * mark_page_lazyfree() moves @page to the inactive file list.
753 * This is done to accelerate the reclaim of @page.
755 void mark_page_lazyfree(struct page *page) in mark_page_lazyfree() argument
757 if (PageLRU(page) && PageAnon(page) && PageSwapBacked(page) && in mark_page_lazyfree()
758 !PageSwapCache(page) && !PageUnevictable(page)) { in mark_page_lazyfree()
763 get_page(page); in mark_page_lazyfree()
764 if (pagevec_add_and_need_flush(pvec, page)) in mark_page_lazyfree()
771 * mark_page_lazyfree_movetail - make a swapbacked page lazyfree
772 * @page: page to deactivate
774 * mark_page_lazyfree_movetail() moves @page to the tail of inactive file list.
775 * This is done to accelerate the reclaim of @page.
777 void mark_page_lazyfree_movetail(struct page *page, bool tail) in mark_page_lazyfree_movetail() argument
779 if (PageLRU(page) && !PageUnevictable(page) && PageSwapBacked(page) && in mark_page_lazyfree_movetail()
780 !PageSwapCache(page)) { in mark_page_lazyfree_movetail()
785 get_page(page); in mark_page_lazyfree_movetail()
786 if (pagevec_add_and_need_flush(pvec, page)) in mark_page_lazyfree_movetail()
894 * below which drains the page vectors. in __lru_add_drain_all()
998 * fell to zero, remove the page from the LRU and free it.
1000 void release_pages(struct page **pages, int nr) in release_pages()
1010 struct page *page = pages[i]; in release_pages() local
1022 page = compound_head(page); in release_pages()
1023 if (is_huge_zero_page(page)) in release_pages()
1026 if (is_zone_device_page(page)) { in release_pages()
1038 if (page_is_devmap_managed(page)) { in release_pages()
1039 put_devmap_managed_page(page); in release_pages()
1044 if (!put_page_testzero(page)) in release_pages()
1047 if (PageCompound(page)) { in release_pages()
1052 __put_compound_page(page); in release_pages()
1056 if (PageLRU(page)) { in release_pages()
1057 struct pglist_data *pgdat = page_pgdat(page); in release_pages()
1068 lruvec = mem_cgroup_page_lruvec(page, locked_pgdat); in release_pages()
1069 VM_BUG_ON_PAGE(!PageLRU(page), page); in release_pages()
1070 __ClearPageLRU(page); in release_pages()
1071 del_page_from_lru_list(page, lruvec, page_off_lru(page)); in release_pages()
1074 __ClearPageWaiters(page); in release_pages()
1076 list_add(&page->lru, &pages_to_free); in release_pages()
1090 * cache-warm and we want to give them back to the page allocator ASAP.
1109 void lru_add_page_tail(struct page *page, struct page *page_tail, in lru_add_page_tail() argument
1112 VM_BUG_ON_PAGE(!PageHead(page), page); in lru_add_page_tail()
1113 VM_BUG_ON_PAGE(PageCompound(page_tail), page); in lru_add_page_tail()
1114 VM_BUG_ON_PAGE(PageLRU(page_tail), page); in lru_add_page_tail()
1120 if (likely(PageLRU(page))) in lru_add_page_tail()
1121 list_add_tail(&page_tail->lru, &page->lru); in lru_add_page_tail()
1123 /* page reclaim is reclaiming a huge page */ in lru_add_page_tail()
1128 * Head page has not yet been counted, as an hpage, in lru_add_page_tail()
1140 static void __pagevec_lru_add_fn(struct page *page, struct lruvec *lruvec, in __pagevec_lru_add_fn() argument
1144 int was_unevictable = TestClearPageUnevictable(page); in __pagevec_lru_add_fn()
1145 int nr_pages = thp_nr_pages(page); in __pagevec_lru_add_fn()
1147 VM_BUG_ON_PAGE(PageLRU(page), page); in __pagevec_lru_add_fn()
1150 * Page becomes evictable in two ways: in __pagevec_lru_add_fn()
1152 * 2) Before acquiring LRU lock to put the page to correct LRU and then in __pagevec_lru_add_fn()
1169 * check will put the page in correct LRU. Without smp_mb(), SetPageLRU in __pagevec_lru_add_fn()
1171 * the isolation of the page whose Mlocked bit is cleared (#0 is also in __pagevec_lru_add_fn()
1172 * looking at the same page) and the evictable page will be stranded in __pagevec_lru_add_fn()
1175 SetPageLRU(page); in __pagevec_lru_add_fn()
1178 if (page_evictable(page)) { in __pagevec_lru_add_fn()
1179 lru = page_lru(page); in __pagevec_lru_add_fn()
1184 ClearPageActive(page); in __pagevec_lru_add_fn()
1185 SetPageUnevictable(page); in __pagevec_lru_add_fn()
1190 add_page_to_lru_list(page, lruvec, lru); in __pagevec_lru_add_fn()
1191 trace_mm_lru_insertion(page, lru); in __pagevec_lru_add_fn()
1220 * Only one subpage of a Transparent Huge Page is returned in one call:
1244 * passed on to page-only pagevec operations.
1251 struct page *page = pvec->pages[i]; in pagevec_remove_exceptionals() local
1252 if (!xa_is_value(page)) in pagevec_remove_exceptionals()
1253 pvec->pages[j++] = page; in pagevec_remove_exceptionals()
1262 * @start: The starting page index
1263 * @end: The final page index
1272 * also update @start to index the next page for the traversal.
1325 void put_devmap_managed_page(struct page *page) in put_devmap_managed_page() argument
1329 if (WARN_ON_ONCE(!page_is_devmap_managed(page))) in put_devmap_managed_page()
1332 count = page_ref_dec_return(page); in put_devmap_managed_page()
1335 * devmap page refcounts are 1-based, rather than 0-based: if in put_devmap_managed_page()
1336 * refcount is 1, then the page is free and the refcount is in put_devmap_managed_page()
1337 * stable because nobody holds a reference on the page. in put_devmap_managed_page()
1340 free_devmap_managed_page(page); in put_devmap_managed_page()
1342 __put_page(page); in put_devmap_managed_page()