Lines Matching refs:page
126 struct page *page, void *shadow) in page_cache_delete() argument
128 XA_STATE(xas, &mapping->i_pages, page->index); in page_cache_delete()
134 if (!PageHuge(page)) { in page_cache_delete()
135 xas_set_order(&xas, page->index, compound_order(page)); in page_cache_delete()
136 nr = compound_nr(page); in page_cache_delete()
139 VM_BUG_ON_PAGE(!PageLocked(page), page); in page_cache_delete()
140 VM_BUG_ON_PAGE(PageTail(page), page); in page_cache_delete()
141 VM_BUG_ON_PAGE(nr != 1 && shadow, page); in page_cache_delete()
146 page->mapping = NULL; in page_cache_delete()
163 struct page *page) in unaccount_page_cache_page() argument
172 if (PageUptodate(page) && PageMappedToDisk(page)) in unaccount_page_cache_page()
173 cleancache_put_page(page); in unaccount_page_cache_page()
175 cleancache_invalidate_page(mapping, page); in unaccount_page_cache_page()
177 VM_BUG_ON_PAGE(PageTail(page), page); in unaccount_page_cache_page()
178 VM_BUG_ON_PAGE(page_mapped(page), page); in unaccount_page_cache_page()
179 if (!IS_ENABLED(CONFIG_DEBUG_VM) && unlikely(page_mapped(page))) { in unaccount_page_cache_page()
183 current->comm, page_to_pfn(page)); in unaccount_page_cache_page()
184 dump_page(page, "still mapped when deleted"); in unaccount_page_cache_page()
188 mapcount = page_mapcount(page); in unaccount_page_cache_page()
190 page_count(page) >= mapcount + 2) { in unaccount_page_cache_page()
197 page_mapcount_reset(page); in unaccount_page_cache_page()
198 page_ref_sub(page, mapcount); in unaccount_page_cache_page()
203 if (PageHuge(page)) in unaccount_page_cache_page()
206 nr = thp_nr_pages(page); in unaccount_page_cache_page()
208 __mod_lruvec_page_state(page, NR_FILE_PAGES, -nr); in unaccount_page_cache_page()
209 if (PageSwapBacked(page)) { in unaccount_page_cache_page()
210 __mod_lruvec_page_state(page, NR_SHMEM, -nr); in unaccount_page_cache_page()
211 if (PageTransHuge(page)) in unaccount_page_cache_page()
212 __dec_node_page_state(page, NR_SHMEM_THPS); in unaccount_page_cache_page()
213 } else if (PageTransHuge(page)) { in unaccount_page_cache_page()
214 __dec_node_page_state(page, NR_FILE_THPS); in unaccount_page_cache_page()
228 if (WARN_ON_ONCE(PageDirty(page))) in unaccount_page_cache_page()
229 account_page_cleaned(page, mapping, inode_to_wb(mapping->host)); in unaccount_page_cache_page()
237 void __delete_from_page_cache(struct page *page, void *shadow) in __delete_from_page_cache() argument
239 struct address_space *mapping = page->mapping; in __delete_from_page_cache()
241 trace_mm_filemap_delete_from_page_cache(page); in __delete_from_page_cache()
243 unaccount_page_cache_page(mapping, page); in __delete_from_page_cache()
244 page_cache_delete(mapping, page, shadow); in __delete_from_page_cache()
248 struct page *page) in page_cache_free_page() argument
250 void (*freepage)(struct page *); in page_cache_free_page()
254 freepage(page); in page_cache_free_page()
256 if (PageTransHuge(page) && !PageHuge(page)) { in page_cache_free_page()
257 page_ref_sub(page, thp_nr_pages(page)); in page_cache_free_page()
258 VM_BUG_ON_PAGE(page_count(page) <= 0, page); in page_cache_free_page()
260 put_page(page); in page_cache_free_page()
272 void delete_from_page_cache(struct page *page) in delete_from_page_cache() argument
274 struct address_space *mapping = page_mapping(page); in delete_from_page_cache()
277 BUG_ON(!PageLocked(page)); in delete_from_page_cache()
279 __delete_from_page_cache(page, NULL); in delete_from_page_cache()
282 page_cache_free_page(mapping, page); in delete_from_page_cache()
306 struct page *page; in page_cache_delete_batch() local
309 xas_for_each(&xas, page, ULONG_MAX) { in page_cache_delete_batch()
314 if (xa_is_value(page)) in page_cache_delete_batch()
323 if (page != pvec->pages[i]) { in page_cache_delete_batch()
324 VM_BUG_ON_PAGE(page->index > pvec->pages[i]->index, in page_cache_delete_batch()
325 page); in page_cache_delete_batch()
329 WARN_ON_ONCE(!PageLocked(page)); in page_cache_delete_batch()
331 if (page->index == xas.xa_index) in page_cache_delete_batch()
332 page->mapping = NULL; in page_cache_delete_batch()
340 if (page->index + compound_nr(page) - 1 == xas.xa_index) in page_cache_delete_batch()
481 struct page *page; in filemap_range_has_page() local
490 page = xas_find(&xas, max); in filemap_range_has_page()
491 if (xas_retry(&xas, page)) in filemap_range_has_page()
494 if (xa_is_value(page)) in filemap_range_has_page()
505 return page != NULL; in filemap_range_has_page()
530 struct page *page = pvec.pages[i]; in __filemap_fdatawait_range() local
532 wait_on_page_writeback(page); in __filemap_fdatawait_range()
533 ClearPageError(page); in __filemap_fdatawait_range()
795 int replace_page_cache_page(struct page *old, struct page *new, gfp_t gfp_mask) in replace_page_cache_page()
798 void (*freepage)(struct page *) = mapping->a_ops->freepage; in replace_page_cache_page()
835 noinline int __add_to_page_cache_locked(struct page *page, in __add_to_page_cache_locked() argument
841 int huge = PageHuge(page); in __add_to_page_cache_locked()
845 VM_BUG_ON_PAGE(!PageLocked(page), page); in __add_to_page_cache_locked()
846 VM_BUG_ON_PAGE(PageSwapBacked(page), page); in __add_to_page_cache_locked()
849 get_page(page); in __add_to_page_cache_locked()
850 page->mapping = mapping; in __add_to_page_cache_locked()
851 page->index = offset; in __add_to_page_cache_locked()
854 error = mem_cgroup_charge(page, current->mm, gfp); in __add_to_page_cache_locked()
866 if (order > thp_order(page)) in __add_to_page_cache_locked()
883 if (order > thp_order(page)) { in __add_to_page_cache_locked()
889 xas_store(&xas, page); in __add_to_page_cache_locked()
899 __inc_lruvec_page_state(page, NR_FILE_PAGES); in __add_to_page_cache_locked()
907 mem_cgroup_uncharge(page); in __add_to_page_cache_locked()
911 trace_mm_filemap_add_to_page_cache(page); in __add_to_page_cache_locked()
914 page->mapping = NULL; in __add_to_page_cache_locked()
916 put_page(page); in __add_to_page_cache_locked()
933 int add_to_page_cache_locked(struct page *page, struct address_space *mapping, in add_to_page_cache_locked() argument
936 return __add_to_page_cache_locked(page, mapping, offset, in add_to_page_cache_locked()
941 int add_to_page_cache_lru(struct page *page, struct address_space *mapping, in add_to_page_cache_lru() argument
947 __SetPageLocked(page); in add_to_page_cache_lru()
948 ret = __add_to_page_cache_locked(page, mapping, offset, in add_to_page_cache_lru()
951 __ClearPageLocked(page); in add_to_page_cache_lru()
961 WARN_ON_ONCE(PageActive(page)); in add_to_page_cache_lru()
963 workingset_refault(page, shadow); in add_to_page_cache_lru()
964 lru_cache_add(page); in add_to_page_cache_lru()
971 struct page *__page_cache_alloc(gfp_t gfp) in __page_cache_alloc()
974 struct page *page; in __page_cache_alloc() local
981 page = __alloc_pages_node(n, gfp, 0); in __page_cache_alloc()
982 } while (!page && read_mems_allowed_retry(cpuset_mems_cookie)); in __page_cache_alloc()
984 return page; in __page_cache_alloc()
1005 static wait_queue_head_t *page_waitqueue(struct page *page) in page_waitqueue() argument
1007 return &page_wait_table[hash_ptr(page, PAGE_WAIT_TABLE_BITS)]; in page_waitqueue()
1070 if (test_bit(key->bit_nr, &key->page->flags)) in wake_page_function()
1073 if (test_and_set_bit(key->bit_nr, &key->page->flags)) in wake_page_function()
1105 static void wake_up_page_bit(struct page *page, int bit_nr) in wake_up_page_bit() argument
1107 wait_queue_head_t *q = page_waitqueue(page); in wake_up_page_bit()
1112 key.page = page; in wake_up_page_bit()
1147 ClearPageWaiters(page); in wake_up_page_bit()
1159 static void wake_up_page(struct page *page, int bit) in wake_up_page() argument
1161 if (!PageWaiters(page)) in wake_up_page()
1163 wake_up_page_bit(page, bit); in wake_up_page()
1185 static inline bool trylock_page_bit_common(struct page *page, int bit_nr, in trylock_page_bit_common() argument
1189 if (test_and_set_bit(bit_nr, &page->flags)) in trylock_page_bit_common()
1191 } else if (test_bit(bit_nr, &page->flags)) in trylock_page_bit_common()
1202 struct page *page, int bit_nr, int state, enum behavior behavior) in wait_on_page_bit_common() argument
1212 !PageUptodate(page) && PageWorkingset(page)) { in wait_on_page_bit_common()
1213 if (!PageSwapBacked(page)) { in wait_on_page_bit_common()
1223 wait_page.page = page; in wait_on_page_bit_common()
1249 SetPageWaiters(page); in wait_on_page_bit_common()
1250 if (!trylock_page_bit_common(page, bit_nr, wait)) in wait_on_page_bit_common()
1263 put_page(page); in wait_on_page_bit_common()
1300 if (unlikely(test_and_set_bit(bit_nr, &page->flags))) in wait_on_page_bit_common()
1340 __sched void wait_on_page_bit(struct page *page, int bit_nr) in wait_on_page_bit() argument
1342 wait_queue_head_t *q = page_waitqueue(page); in wait_on_page_bit()
1343 wait_on_page_bit_common(q, page, bit_nr, TASK_UNINTERRUPTIBLE, SHARED); in wait_on_page_bit()
1347 __sched int wait_on_page_bit_killable(struct page *page, int bit_nr) in wait_on_page_bit_killable() argument
1349 wait_queue_head_t *q = page_waitqueue(page); in wait_on_page_bit_killable()
1350 return wait_on_page_bit_common(q, page, bit_nr, TASK_KILLABLE, SHARED); in wait_on_page_bit_killable()
1354 static int __wait_on_page_locked_async(struct page *page, in __wait_on_page_locked_async() argument
1357 struct wait_queue_head *q = page_waitqueue(page); in __wait_on_page_locked_async()
1360 wait->page = page; in __wait_on_page_locked_async()
1365 SetPageWaiters(page); in __wait_on_page_locked_async()
1367 ret = !trylock_page(page); in __wait_on_page_locked_async()
1369 ret = PageLocked(page); in __wait_on_page_locked_async()
1384 static int wait_on_page_locked_async(struct page *page, in wait_on_page_locked_async() argument
1387 if (!PageLocked(page)) in wait_on_page_locked_async()
1389 return __wait_on_page_locked_async(compound_head(page), wait, false); in wait_on_page_locked_async()
1402 void put_and_wait_on_page_locked(struct page *page) in put_and_wait_on_page_locked() argument
1406 page = compound_head(page); in put_and_wait_on_page_locked()
1407 q = page_waitqueue(page); in put_and_wait_on_page_locked()
1408 wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, DROP); in put_and_wait_on_page_locked()
1418 void add_page_wait_queue(struct page *page, wait_queue_entry_t *waiter) in add_page_wait_queue() argument
1420 wait_queue_head_t *q = page_waitqueue(page); in add_page_wait_queue()
1425 SetPageWaiters(page); in add_page_wait_queue()
1468 void unlock_page(struct page *page) in unlock_page() argument
1471 page = compound_head(page); in unlock_page()
1472 VM_BUG_ON_PAGE(!PageLocked(page), page); in unlock_page()
1473 if (clear_bit_unlock_is_negative_byte(PG_locked, &page->flags)) in unlock_page()
1474 wake_up_page_bit(page, PG_locked); in unlock_page()
1482 void end_page_writeback(struct page *page) in end_page_writeback() argument
1491 if (PageReclaim(page)) { in end_page_writeback()
1492 ClearPageReclaim(page); in end_page_writeback()
1493 rotate_reclaimable_page(page); in end_page_writeback()
1502 get_page(page); in end_page_writeback()
1503 if (!test_clear_page_writeback(page)) in end_page_writeback()
1507 wake_up_page(page, PG_writeback); in end_page_writeback()
1508 put_page(page); in end_page_writeback()
1516 void page_endio(struct page *page, bool is_write, int err) in page_endio() argument
1520 SetPageUptodate(page); in page_endio()
1522 ClearPageUptodate(page); in page_endio()
1523 SetPageError(page); in page_endio()
1525 unlock_page(page); in page_endio()
1530 SetPageError(page); in page_endio()
1531 mapping = page_mapping(page); in page_endio()
1535 end_page_writeback(page); in page_endio()
1544 __sched void __lock_page(struct page *__page) in __lock_page()
1546 struct page *page = compound_head(__page); in __lock_page() local
1547 wait_queue_head_t *q = page_waitqueue(page); in __lock_page()
1548 wait_on_page_bit_common(q, page, PG_locked, TASK_UNINTERRUPTIBLE, in __lock_page()
1553 __sched int __lock_page_killable(struct page *__page) in __lock_page_killable()
1555 struct page *page = compound_head(__page); in __lock_page_killable() local
1556 wait_queue_head_t *q = page_waitqueue(page); in __lock_page_killable()
1557 return wait_on_page_bit_common(q, page, PG_locked, TASK_KILLABLE, in __lock_page_killable()
1562 __sched int __lock_page_async(struct page *page, struct wait_page_queue *wait) in __lock_page_async() argument
1564 return __wait_on_page_locked_async(page, wait, true); in __lock_page_async()
1578 __sched int __lock_page_or_retry(struct page *page, struct mm_struct *mm, in __lock_page_or_retry() argument
1591 wait_on_page_locked_killable(page); in __lock_page_or_retry()
1593 wait_on_page_locked(page); in __lock_page_or_retry()
1599 ret = __lock_page_killable(page); in __lock_page_or_retry()
1605 __lock_page(page); in __lock_page_or_retry()
1695 struct page *find_get_entry(struct address_space *mapping, pgoff_t index) in find_get_entry()
1698 struct page *page; in find_get_entry() local
1703 page = xas_load(&xas); in find_get_entry()
1704 if (xas_retry(&xas, page)) in find_get_entry()
1710 if (!page || xa_is_value(page)) in find_get_entry()
1713 if (!page_cache_get_speculative(page)) in find_get_entry()
1721 if (unlikely(page != xas_reload(&xas))) { in find_get_entry()
1722 put_page(page); in find_get_entry()
1728 return page; in find_get_entry()
1745 struct page *find_lock_entry(struct address_space *mapping, pgoff_t index) in find_lock_entry()
1747 struct page *page; in find_lock_entry() local
1750 page = find_get_entry(mapping, index); in find_lock_entry()
1751 if (page && !xa_is_value(page)) { in find_lock_entry()
1752 lock_page(page); in find_lock_entry()
1754 if (unlikely(page->mapping != mapping)) { in find_lock_entry()
1755 unlock_page(page); in find_lock_entry()
1756 put_page(page); in find_lock_entry()
1759 VM_BUG_ON_PAGE(!thp_contains(page, index), page); in find_lock_entry()
1761 return page; in find_lock_entry()
1796 struct page *pagecache_get_page(struct address_space *mapping, pgoff_t index, in pagecache_get_page()
1799 struct page *page; in pagecache_get_page() local
1802 page = find_get_entry(mapping, index); in pagecache_get_page()
1803 if (xa_is_value(page)) in pagecache_get_page()
1804 page = NULL; in pagecache_get_page()
1807 gfp_mask, page); in pagecache_get_page()
1808 if (!page) in pagecache_get_page()
1813 if (!trylock_page(page)) { in pagecache_get_page()
1814 put_page(page); in pagecache_get_page()
1818 lock_page(page); in pagecache_get_page()
1822 if (unlikely(page->mapping != mapping)) { in pagecache_get_page()
1823 unlock_page(page); in pagecache_get_page()
1824 put_page(page); in pagecache_get_page()
1827 VM_BUG_ON_PAGE(!thp_contains(page, index), page); in pagecache_get_page()
1831 mark_page_accessed(page); in pagecache_get_page()
1834 if (page_is_idle(page)) in pagecache_get_page()
1835 clear_page_idle(page); in pagecache_get_page()
1838 page = find_subpage(page, index); in pagecache_get_page()
1841 if (!page && (fgp_flags & FGP_CREAT)) { in pagecache_get_page()
1848 page = __page_cache_alloc(gfp_mask); in pagecache_get_page()
1849 if (!page) in pagecache_get_page()
1857 __SetPageReferenced(page); in pagecache_get_page()
1859 err = add_to_page_cache_lru(page, mapping, index, gfp_mask); in pagecache_get_page()
1861 put_page(page); in pagecache_get_page()
1862 page = NULL; in pagecache_get_page()
1871 if (page && (fgp_flags & FGP_FOR_MMAP)) in pagecache_get_page()
1872 unlock_page(page); in pagecache_get_page()
1875 return page; in pagecache_get_page()
1908 struct page **entries, pgoff_t *indices) in find_get_entries()
1911 struct page *page; in find_get_entries() local
1918 xas_for_each(&xas, page, ULONG_MAX) { in find_get_entries()
1919 if (xas_retry(&xas, page)) in find_get_entries()
1926 if (xa_is_value(page)) in find_get_entries()
1929 if (!page_cache_get_speculative(page)) in find_get_entries()
1933 if (unlikely(page != xas_reload(&xas))) in find_get_entries()
1940 if (PageTransHuge(page) && !PageHuge(page)) { in find_get_entries()
1941 page = find_subpage(page, xas.xa_index); in find_get_entries()
1946 entries[ret] = page; in find_get_entries()
1951 put_page(page); in find_get_entries()
1982 struct page **pages) in find_get_pages_range()
1985 struct page *page; in find_get_pages_range() local
1992 xas_for_each(&xas, page, end) { in find_get_pages_range()
1993 if (xas_retry(&xas, page)) in find_get_pages_range()
1996 if (xa_is_value(page)) in find_get_pages_range()
1999 if (!page_cache_get_speculative(page)) in find_get_pages_range()
2003 if (unlikely(page != xas_reload(&xas))) in find_get_pages_range()
2006 pages[ret] = find_subpage(page, xas.xa_index); in find_get_pages_range()
2013 put_page(page); in find_get_pages_range()
2047 unsigned int nr_pages, struct page **pages) in find_get_pages_contig()
2050 struct page *page; in find_get_pages_contig() local
2057 for (page = xas_load(&xas); page; page = xas_next(&xas)) { in find_get_pages_contig()
2058 if (xas_retry(&xas, page)) in find_get_pages_contig()
2064 if (xa_is_value(page)) in find_get_pages_contig()
2067 if (!page_cache_get_speculative(page)) in find_get_pages_contig()
2071 if (unlikely(page != xas_reload(&xas))) in find_get_pages_contig()
2074 pages[ret] = find_subpage(page, xas.xa_index); in find_get_pages_contig()
2079 put_page(page); in find_get_pages_contig()
2104 struct page **pages) in find_get_pages_range_tag()
2107 struct page *page; in find_get_pages_range_tag() local
2114 xas_for_each_marked(&xas, page, end, tag) { in find_get_pages_range_tag()
2115 if (xas_retry(&xas, page)) in find_get_pages_range_tag()
2122 if (xa_is_value(page)) in find_get_pages_range_tag()
2125 if (!page_cache_get_speculative(page)) in find_get_pages_range_tag()
2129 if (unlikely(page != xas_reload(&xas))) in find_get_pages_range_tag()
2132 pages[ret] = find_subpage(page, xas.xa_index); in find_get_pages_range_tag()
2139 put_page(page); in find_get_pages_range_tag()
2231 struct page *page; in generic_file_buffered_read() local
2243 page = find_get_page(mapping, index); in generic_file_buffered_read()
2244 if (!page) { in generic_file_buffered_read()
2250 page = find_get_page(mapping, index); in generic_file_buffered_read()
2251 if (unlikely(page == NULL)) in generic_file_buffered_read()
2254 if (PageReadahead(page)) { in generic_file_buffered_read()
2256 put_page(page); in generic_file_buffered_read()
2260 ra, filp, page, in generic_file_buffered_read()
2263 if (!PageUptodate(page)) { in generic_file_buffered_read()
2271 put_page(page); in generic_file_buffered_read()
2274 error = wait_on_page_locked_async(page, in generic_file_buffered_read()
2278 put_page(page); in generic_file_buffered_read()
2281 error = wait_on_page_locked_killable(page); in generic_file_buffered_read()
2285 if (PageUptodate(page)) in generic_file_buffered_read()
2294 if (!trylock_page(page)) in generic_file_buffered_read()
2297 if (!page->mapping) in generic_file_buffered_read()
2299 if (!mapping->a_ops->is_partially_uptodate(page, in generic_file_buffered_read()
2302 unlock_page(page); in generic_file_buffered_read()
2317 put_page(page); in generic_file_buffered_read()
2326 put_page(page); in generic_file_buffered_read()
2337 flush_dcache_page(page); in generic_file_buffered_read()
2344 mark_page_accessed(page); in generic_file_buffered_read()
2352 ret = copy_page_to_iter(page, offset, nr, iter); in generic_file_buffered_read()
2358 put_page(page); in generic_file_buffered_read()
2372 put_page(page); in generic_file_buffered_read()
2375 error = lock_page_async(page, iocb->ki_waitq); in generic_file_buffered_read()
2377 error = lock_page_killable(page); in generic_file_buffered_read()
2384 if (!page->mapping) { in generic_file_buffered_read()
2385 unlock_page(page); in generic_file_buffered_read()
2386 put_page(page); in generic_file_buffered_read()
2391 if (PageUptodate(page)) { in generic_file_buffered_read()
2392 unlock_page(page); in generic_file_buffered_read()
2398 unlock_page(page); in generic_file_buffered_read()
2399 put_page(page); in generic_file_buffered_read()
2407 ClearPageError(page); in generic_file_buffered_read()
2409 error = mapping->a_ops->readpage(filp, page); in generic_file_buffered_read()
2413 put_page(page); in generic_file_buffered_read()
2420 if (!PageUptodate(page)) { in generic_file_buffered_read()
2423 put_page(page); in generic_file_buffered_read()
2426 error = lock_page_async(page, iocb->ki_waitq); in generic_file_buffered_read()
2428 error = lock_page_killable(page); in generic_file_buffered_read()
2433 if (!PageUptodate(page)) { in generic_file_buffered_read()
2434 if (page->mapping == NULL) { in generic_file_buffered_read()
2438 unlock_page(page); in generic_file_buffered_read()
2439 put_page(page); in generic_file_buffered_read()
2442 unlock_page(page); in generic_file_buffered_read()
2447 unlock_page(page); in generic_file_buffered_read()
2454 put_page(page); in generic_file_buffered_read()
2462 page = page_cache_alloc(mapping); in generic_file_buffered_read()
2463 if (!page) { in generic_file_buffered_read()
2467 error = add_to_page_cache_lru(page, mapping, index, in generic_file_buffered_read()
2470 put_page(page); in generic_file_buffered_read()
2584 static int lock_page_maybe_drop_mmap(struct vm_fault *vmf, struct page *page, in lock_page_maybe_drop_mmap() argument
2587 if (trylock_page(page)) in lock_page_maybe_drop_mmap()
2600 if (__lock_page_killable(page)) { in lock_page_maybe_drop_mmap()
2612 __lock_page(page); in lock_page_maybe_drop_mmap()
2675 struct page *page) in do_async_mmap_readahead() argument
2690 if (PageReadahead(page)) { in do_async_mmap_readahead()
2693 page, offset, ra->ra_pages); in do_async_mmap_readahead()
2733 struct page *page = NULL; in filemap_fault() local
2738 page = find_get_page(mapping, offset); in filemap_fault()
2739 if (unlikely(!page) || unlikely(PageReadahead(page))) in filemap_fault()
2742 if (!trylock_page(page)) in filemap_fault()
2745 if (unlikely(compound_head(page)->mapping != mapping)) in filemap_fault()
2747 VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page); in filemap_fault()
2748 if (unlikely(!PageUptodate(page))) in filemap_fault()
2771 vmf->page = page; in filemap_fault()
2774 unlock_page(page); in filemap_fault()
2782 trace_android_vh_filemap_fault_get_page(vmf, &page, &retry); in filemap_fault()
2785 if (unlikely(page)) in filemap_fault()
2791 page = find_get_page(mapping, offset); in filemap_fault()
2792 if (likely(page) && !(vmf->flags & FAULT_FLAG_TRIED)) { in filemap_fault()
2797 fpin = do_async_mmap_readahead(vmf, page); in filemap_fault()
2798 } else if (!page) { in filemap_fault()
2805 page = pagecache_get_page(mapping, offset, in filemap_fault()
2808 if (!page) { in filemap_fault()
2815 if (!lock_page_maybe_drop_mmap(vmf, page, &fpin)) in filemap_fault()
2819 if (unlikely(compound_head(page)->mapping != mapping)) { in filemap_fault()
2820 unlock_page(page); in filemap_fault()
2821 put_page(page); in filemap_fault()
2824 VM_BUG_ON_PAGE(page_to_pgoff(page) != offset, page); in filemap_fault()
2830 if (unlikely(!PageUptodate(page))) in filemap_fault()
2839 unlock_page(page); in filemap_fault()
2850 unlock_page(page); in filemap_fault()
2851 put_page(page); in filemap_fault()
2855 vmf->page = page; in filemap_fault()
2865 ClearPageError(page); in filemap_fault()
2867 error = mapping->a_ops->readpage(file, page); in filemap_fault()
2869 wait_on_page_locked(page); in filemap_fault()
2870 if (!PageUptodate(page)) in filemap_fault()
2875 put_page(page); in filemap_fault()
2889 if (page) { in filemap_fault()
2890 trace_android_vh_filemap_fault_cache_page(vmf, page); in filemap_fault()
2891 put_page(page); in filemap_fault()
2899 static bool filemap_map_pmd(struct vm_fault *vmf, struct page *page) in filemap_map_pmd() argument
2905 unlock_page(page); in filemap_map_pmd()
2906 put_page(page); in filemap_map_pmd()
2910 if (pmd_none(*vmf->pmd) && PageTransHuge(page)) { in filemap_map_pmd()
2911 vm_fault_t ret = do_set_pmd(vmf, page); in filemap_map_pmd()
2914 unlock_page(page); in filemap_map_pmd()
2931 unlock_page(page); in filemap_map_pmd()
2932 put_page(page); in filemap_map_pmd()
2939 static struct page *next_uptodate_page(struct page *page, in next_uptodate_page() argument
2946 if (!page) in next_uptodate_page()
2948 if (xas_retry(xas, page)) in next_uptodate_page()
2950 if (xa_is_value(page)) in next_uptodate_page()
2952 if (PageLocked(page)) in next_uptodate_page()
2954 if (!page_cache_get_speculative(page)) in next_uptodate_page()
2957 if (unlikely(page != xas_reload(xas))) in next_uptodate_page()
2959 if (!PageUptodate(page) || PageReadahead(page)) in next_uptodate_page()
2961 if (PageHWPoison(page)) in next_uptodate_page()
2963 if (!trylock_page(page)) in next_uptodate_page()
2965 if (page->mapping != mapping) in next_uptodate_page()
2967 if (!PageUptodate(page)) in next_uptodate_page()
2972 return page; in next_uptodate_page()
2974 unlock_page(page); in next_uptodate_page()
2976 put_page(page); in next_uptodate_page()
2977 } while ((page = xas_next_entry(xas, end_pgoff)) != NULL); in next_uptodate_page()
2982 static inline struct page *first_map_page(struct address_space *mapping, in first_map_page()
2990 static inline struct page *next_map_page(struct address_space *mapping, in next_map_page()
3015 struct page *head, *page; in filemap_map_pages() local
3039 page = find_subpage(head, xas.xa_index); in filemap_map_pages()
3040 if (PageHWPoison(page)) in filemap_map_pages()
3057 do_set_pte(vmf, page, addr); in filemap_map_pages()
3076 struct page *page = vmf->page; in filemap_page_mkwrite() local
3082 lock_page(page); in filemap_page_mkwrite()
3083 if (page->mapping != inode->i_mapping) { in filemap_page_mkwrite()
3084 unlock_page(page); in filemap_page_mkwrite()
3093 set_page_dirty(page); in filemap_page_mkwrite()
3094 wait_for_stable_page(page); in filemap_page_mkwrite()
3150 static struct page *wait_on_page_read(struct page *page) in wait_on_page_read() argument
3152 if (!IS_ERR(page)) { in wait_on_page_read()
3153 wait_on_page_locked(page); in wait_on_page_read()
3154 if (!PageUptodate(page)) { in wait_on_page_read()
3155 put_page(page); in wait_on_page_read()
3156 page = ERR_PTR(-EIO); in wait_on_page_read()
3159 return page; in wait_on_page_read()
3162 static struct page *do_read_cache_page(struct address_space *mapping, in do_read_cache_page()
3164 int (*filler)(void *, struct page *), in do_read_cache_page() argument
3168 struct page *page; in do_read_cache_page() local
3171 page = find_get_page(mapping, index); in do_read_cache_page()
3172 if (!page) { in do_read_cache_page()
3173 page = __page_cache_alloc(gfp); in do_read_cache_page()
3174 if (!page) in do_read_cache_page()
3176 err = add_to_page_cache_lru(page, mapping, index, gfp); in do_read_cache_page()
3178 put_page(page); in do_read_cache_page()
3187 err = filler(data, page); in do_read_cache_page()
3189 err = mapping->a_ops->readpage(data, page); in do_read_cache_page()
3192 put_page(page); in do_read_cache_page()
3196 page = wait_on_page_read(page); in do_read_cache_page()
3197 if (IS_ERR(page)) in do_read_cache_page()
3198 return page; in do_read_cache_page()
3201 if (PageUptodate(page)) in do_read_cache_page()
3235 wait_on_page_locked(page); in do_read_cache_page()
3236 if (PageUptodate(page)) in do_read_cache_page()
3240 lock_page(page); in do_read_cache_page()
3243 if (!page->mapping) { in do_read_cache_page()
3244 unlock_page(page); in do_read_cache_page()
3245 put_page(page); in do_read_cache_page()
3250 if (PageUptodate(page)) { in do_read_cache_page()
3251 unlock_page(page); in do_read_cache_page()
3261 ClearPageError(page); in do_read_cache_page()
3265 mark_page_accessed(page); in do_read_cache_page()
3266 return page; in do_read_cache_page()
3283 struct page *read_cache_page(struct address_space *mapping, in read_cache_page()
3285 int (*filler)(void *, struct page *), in read_cache_page() argument
3306 struct page *read_cache_page_gfp(struct address_space *mapping, in read_cache_page_gfp()
3316 struct page **pagep, void **fsdata) in pagecache_write_begin()
3327 struct page *page, void *fsdata) in pagecache_write_end() argument
3331 return aops->write_end(file, mapping, pos, len, copied, page, fsdata); in pagecache_write_end()
3442 struct page *grab_cache_page_write_begin(struct address_space *mapping, in grab_cache_page_write_begin()
3445 struct page *page; in grab_cache_page_write_begin() local
3451 page = pagecache_get_page(mapping, index, fgp_flags, in grab_cache_page_write_begin()
3453 if (page) in grab_cache_page_write_begin()
3454 wait_for_stable_page(page); in grab_cache_page_write_begin()
3456 return page; in grab_cache_page_write_begin()
3470 struct page *page; in generic_perform_write() local
3502 &page, &fsdata); in generic_perform_write()
3507 flush_dcache_page(page); in generic_perform_write()
3509 copied = iov_iter_copy_from_user_atomic(page, i, offset, bytes); in generic_perform_write()
3510 flush_dcache_page(page); in generic_perform_write()
3513 page, fsdata); in generic_perform_write()
3688 int try_to_release_page(struct page *page, gfp_t gfp_mask) in try_to_release_page() argument
3690 struct address_space * const mapping = page->mapping; in try_to_release_page()
3692 BUG_ON(!PageLocked(page)); in try_to_release_page()
3693 if (PageWriteback(page)) in try_to_release_page()
3697 return mapping->a_ops->releasepage(page, gfp_mask); in try_to_release_page()
3698 return try_to_free_buffers(page); in try_to_release_page()