Lines Matching full:page
45 * An mlocked page [PageMlocked(page)] is unevictable. As such, it will
52 * may have mlocked a page that is being munlocked. So lazy mlock must take
60 void clear_page_mlock(struct page *page) in clear_page_mlock() argument
64 if (!TestClearPageMlocked(page)) in clear_page_mlock()
67 nr_pages = thp_nr_pages(page); in clear_page_mlock()
68 mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); in clear_page_mlock()
76 if (!isolate_lru_page(page)) { in clear_page_mlock()
77 putback_lru_page(page); in clear_page_mlock()
80 * We lost the race. the page already moved to evictable list. in clear_page_mlock()
82 if (PageUnevictable(page)) in clear_page_mlock()
88 * Mark page as mlocked if not already.
89 * If page on LRU, isolate and putback to move to unevictable list.
91 void mlock_vma_page(struct page *page) in mlock_vma_page() argument
93 /* Serialize with page migration */ in mlock_vma_page()
94 BUG_ON(!PageLocked(page)); in mlock_vma_page()
96 VM_BUG_ON_PAGE(PageTail(page), page); in mlock_vma_page()
97 VM_BUG_ON_PAGE(PageCompound(page) && PageDoubleMap(page), page); in mlock_vma_page()
99 if (!TestSetPageMlocked(page)) { in mlock_vma_page()
100 int nr_pages = thp_nr_pages(page); in mlock_vma_page()
102 mod_zone_page_state(page_zone(page), NR_MLOCK, nr_pages); in mlock_vma_page()
104 if (!isolate_lru_page(page)) in mlock_vma_page()
105 putback_lru_page(page); in mlock_vma_page()
110 * Isolate a page from LRU with optional get_page() pin.
111 * Assumes lru_lock already held and page already pinned.
113 static bool __munlock_isolate_lru_page(struct page *page, bool getpage) in __munlock_isolate_lru_page() argument
115 if (PageLRU(page)) { in __munlock_isolate_lru_page()
118 lruvec = mem_cgroup_page_lruvec(page, page_pgdat(page)); in __munlock_isolate_lru_page()
120 get_page(page); in __munlock_isolate_lru_page()
121 ClearPageLRU(page); in __munlock_isolate_lru_page()
122 del_page_from_lru_list(page, lruvec, page_lru(page)); in __munlock_isolate_lru_page()
130 * Finish munlock after successful page isolation
132 * Page must be locked. This is a wrapper for try_to_munlock()
135 static void __munlock_isolated_page(struct page *page) in __munlock_isolated_page() argument
138 * Optimization: if the page was mapped just once, that's our mapping in __munlock_isolated_page()
141 if (page_mapcount(page) > 1) in __munlock_isolated_page()
142 try_to_munlock(page); in __munlock_isolated_page()
145 if (!PageMlocked(page)) in __munlock_isolated_page()
146 count_vm_events(UNEVICTABLE_PGMUNLOCKED, thp_nr_pages(page)); in __munlock_isolated_page()
148 putback_lru_page(page); in __munlock_isolated_page()
152 * Accounting for page isolation fail during munlock
154 * Performs accounting when page isolation fails in munlock. There is nothing
155 * else to do because it means some other task has already removed the page
156 * from the LRU. putback_lru_page() will take care of removing the page from
158 * the page back to the unevictable list if some other vma has it mlocked.
160 static void __munlock_isolation_failed(struct page *page) in __munlock_isolation_failed() argument
162 int nr_pages = thp_nr_pages(page); in __munlock_isolation_failed()
164 if (PageUnevictable(page)) in __munlock_isolation_failed()
171 * munlock_vma_page - munlock a vma page
172 * @page: page to be unlocked, either a normal page or THP page head
174 * returns the size of the page as a page mask (0 for normal page,
175 * HPAGE_PMD_NR - 1 for THP head page)
177 * called from munlock()/munmap() path with page supposedly on the LRU.
178 * When we munlock a page, because the vma where we found the page is being
180 * page locked so that we can leave it on the unevictable lru list and not
181 * bother vmscan with it. However, to walk the page's rmap list in
182 * try_to_munlock() we must isolate the page from the LRU. If some other
183 * task has removed the page from the LRU, we won't be able to do that.
185 * can't isolate the page, we leave it for putback_lru_page() and vmscan
188 unsigned int munlock_vma_page(struct page *page) in munlock_vma_page() argument
191 pg_data_t *pgdat = page_pgdat(page); in munlock_vma_page()
193 /* For try_to_munlock() and to serialize with page migration */ in munlock_vma_page()
194 BUG_ON(!PageLocked(page)); in munlock_vma_page()
196 VM_BUG_ON_PAGE(PageTail(page), page); in munlock_vma_page()
201 * we clear it in the head page. It also stabilizes thp_nr_pages(). in munlock_vma_page()
205 if (!TestClearPageMlocked(page)) { in munlock_vma_page()
211 nr_pages = thp_nr_pages(page); in munlock_vma_page()
212 __mod_zone_page_state(page_zone(page), NR_MLOCK, -nr_pages); in munlock_vma_page()
214 if (__munlock_isolate_lru_page(page, true)) { in munlock_vma_page()
216 __munlock_isolated_page(page); in munlock_vma_page()
219 __munlock_isolation_failed(page); in munlock_vma_page()
241 * Prepare page for fast batched LRU putback via putback_lru_evictable_pagevec()
247 * avoid leaving evictable page in unevictable list.
249 * In case of success, @page is added to @pvec and @pgrescued is incremented
250 * in case that the page was previously unevictable. @page is also unlocked.
252 static bool __putback_lru_fast_prepare(struct page *page, struct pagevec *pvec, in __putback_lru_fast_prepare() argument
255 VM_BUG_ON_PAGE(PageLRU(page), page); in __putback_lru_fast_prepare()
256 VM_BUG_ON_PAGE(!PageLocked(page), page); in __putback_lru_fast_prepare()
258 if (page_mapcount(page) <= 1 && page_evictable(page)) { in __putback_lru_fast_prepare()
259 pagevec_add(pvec, page); in __putback_lru_fast_prepare()
260 if (TestClearPageUnevictable(page)) in __putback_lru_fast_prepare()
262 unlock_page(page); in __putback_lru_fast_prepare()
306 /* Phase 1: page isolation */ in __munlock_pagevec()
309 struct page *page = pvec->pages[i]; in __munlock_pagevec() local
311 if (TestClearPageMlocked(page)) { in __munlock_pagevec()
316 if (__munlock_isolate_lru_page(page, false)) in __munlock_pagevec()
319 __munlock_isolation_failed(page); in __munlock_pagevec()
325 * We won't be munlocking this page in the next phase in __munlock_pagevec()
339 /* Phase 2: page munlock */ in __munlock_pagevec()
341 struct page *page = pvec->pages[i]; in __munlock_pagevec() local
343 if (page) { in __munlock_pagevec()
344 lock_page(page); in __munlock_pagevec()
345 if (!__putback_lru_fast_prepare(page, &pvec_putback, in __munlock_pagevec()
351 get_page(page); /* for putback_lru_page() */ in __munlock_pagevec()
352 __munlock_isolated_page(page); in __munlock_pagevec()
353 unlock_page(page); in __munlock_pagevec()
354 put_page(page); /* from follow_page_mask() */ in __munlock_pagevec()
360 * Phase 3: page putback for pages that qualified for the fast path in __munlock_pagevec()
370 * The function expects that the struct page corresponding to @start address is
371 * a non-TPH page already pinned and in the @pvec, and that it belongs to @zone.
377 * Returns the address of the next page that should be scanned. This equals
378 * @start + PAGE_SIZE when no page could be added by the pte walk.
388 * Initialize pte walk starting at the already pinned page where we in __munlock_pagevec_fill()
393 /* Make sure we do not cross the page table boundary */ in __munlock_pagevec_fill()
399 /* The page next to the pinned page is the first we will try to get */ in __munlock_pagevec_fill()
402 struct page *page = NULL; in __munlock_pagevec_fill() local
405 page = vm_normal_page(vma, start, *pte); in __munlock_pagevec_fill()
407 * Break if page could not be obtained or the page's node+zone does not in __munlock_pagevec_fill()
410 if (!page || page_zone(page) != zone) in __munlock_pagevec_fill()
417 if (PageTransCompound(page)) in __munlock_pagevec_fill()
420 get_page(page); in __munlock_pagevec_fill()
423 * eventual break due to pvec becoming full by adding the page in __munlock_pagevec_fill()
426 if (pagevec_add(pvec, page) == 0) in __munlock_pagevec_fill()
459 struct page *page; in munlock_vma_pages_range() local
470 * suits munlock very well (and if somehow an abnormal page in munlock_vma_pages_range()
473 page = follow_page(vma, start, FOLL_GET | FOLL_DUMP); in munlock_vma_pages_range()
474 if (page && !IS_ERR(page)) { in munlock_vma_pages_range()
482 reset_page_pinner(page, compound_order(page)); in munlock_vma_pages_range()
483 if (PageTransTail(page)) { in munlock_vma_pages_range()
484 VM_BUG_ON_PAGE(PageMlocked(page), page); in munlock_vma_pages_range()
485 put_page(page); /* follow_page_mask() */ in munlock_vma_pages_range()
486 } else if (PageTransHuge(page)) { in munlock_vma_pages_range()
487 lock_page(page); in munlock_vma_pages_range()
489 * Any THP page found by follow_page_mask() may in munlock_vma_pages_range()
494 page_mask = munlock_vma_page(page); in munlock_vma_pages_range()
495 unlock_page(page); in munlock_vma_pages_range()
496 put_page(page); /* follow_page_mask() */ in munlock_vma_pages_range()
503 pagevec_add(&pvec, page); in munlock_vma_pages_range()
504 zone = page_zone(page); in munlock_vma_pages_range()
509 * the next page to process. Then munlock the in munlock_vma_pages_range()
584 * It's okay if try_to_unmap_one unmaps a page just after we in mlock_fixup()
653 * Return value: previously mlocked page counts