Lines Matching refs:xas
128 XA_STATE(xas, &mapping->i_pages, page->index); in page_cache_delete()
131 mapping_set_update(&xas, mapping); in page_cache_delete()
135 xas_set_order(&xas, page->index, compound_order(page)); in page_cache_delete()
143 xas_store(&xas, shadow); in page_cache_delete()
144 xas_init_marks(&xas); in page_cache_delete()
303 XA_STATE(xas, &mapping->i_pages, pvec->pages[0]->index); in page_cache_delete_batch()
308 mapping_set_update(&xas, mapping); in page_cache_delete_batch()
309 xas_for_each(&xas, page, ULONG_MAX) { in page_cache_delete_batch()
331 if (page->index == xas.xa_index) in page_cache_delete_batch()
340 if (page->index + compound_nr(page) - 1 == xas.xa_index) in page_cache_delete_batch()
342 xas_store(&xas, NULL); in page_cache_delete_batch()
482 XA_STATE(xas, &mapping->i_pages, start_byte >> PAGE_SHIFT); in filemap_range_has_page()
490 page = xas_find(&xas, max); in filemap_range_has_page()
491 if (xas_retry(&xas, page)) in filemap_range_has_page()
800 XA_STATE(xas, &mapping->i_pages, offset); in replace_page_cache_page()
813 xas_lock_irqsave(&xas, flags); in replace_page_cache_page()
814 xas_store(&xas, new); in replace_page_cache_page()
826 xas_unlock_irqrestore(&xas, flags); in replace_page_cache_page()
840 XA_STATE(xas, &mapping->i_pages, offset); in __add_to_page_cache_locked()
847 mapping_set_update(&xas, mapping); in __add_to_page_cache_locked()
863 unsigned int order = xa_get_order(xas.xa, xas.xa_index); in __add_to_page_cache_locked()
867 xas_split_alloc(&xas, xa_load(xas.xa, xas.xa_index), in __add_to_page_cache_locked()
869 xas_lock_irq(&xas); in __add_to_page_cache_locked()
870 xas_for_each_conflict(&xas, entry) { in __add_to_page_cache_locked()
873 xas_set_err(&xas, -EEXIST); in __add_to_page_cache_locked()
882 order = xa_get_order(xas.xa, xas.xa_index); in __add_to_page_cache_locked()
884 xas_split(&xas, old, order); in __add_to_page_cache_locked()
885 xas_reset(&xas); in __add_to_page_cache_locked()
889 xas_store(&xas, page); in __add_to_page_cache_locked()
890 if (xas_error(&xas)) in __add_to_page_cache_locked()
901 xas_unlock_irq(&xas); in __add_to_page_cache_locked()
902 } while (xas_nomem(&xas, gfp)); in __add_to_page_cache_locked()
904 if (xas_error(&xas)) { in __add_to_page_cache_locked()
905 error = xas_error(&xas); in __add_to_page_cache_locked()
1632 XA_STATE(xas, &mapping->i_pages, index); in page_cache_next_miss()
1635 void *entry = xas_next(&xas); in page_cache_next_miss()
1638 if (xas.xa_index == 0) in page_cache_next_miss()
1642 return xas.xa_index; in page_cache_next_miss()
1668 XA_STATE(xas, &mapping->i_pages, index); in page_cache_prev_miss()
1671 void *entry = xas_prev(&xas); in page_cache_prev_miss()
1674 if (xas.xa_index == ULONG_MAX) in page_cache_prev_miss()
1678 return xas.xa_index; in page_cache_prev_miss()
1697 XA_STATE(xas, &mapping->i_pages, index); in find_get_entry()
1702 xas_reset(&xas); in find_get_entry()
1703 page = xas_load(&xas); in find_get_entry()
1704 if (xas_retry(&xas, page)) in find_get_entry()
1721 if (unlikely(page != xas_reload(&xas))) { in find_get_entry()
1910 XA_STATE(xas, &mapping->i_pages, start); in find_get_entries()
1918 xas_for_each(&xas, page, ULONG_MAX) { in find_get_entries()
1919 if (xas_retry(&xas, page)) in find_get_entries()
1933 if (unlikely(page != xas_reload(&xas))) in find_get_entries()
1941 page = find_subpage(page, xas.xa_index); in find_get_entries()
1945 indices[ret] = xas.xa_index; in find_get_entries()
1953 xas_reset(&xas); in find_get_entries()
1984 XA_STATE(xas, &mapping->i_pages, *start); in find_get_pages_range()
1992 xas_for_each(&xas, page, end) { in find_get_pages_range()
1993 if (xas_retry(&xas, page)) in find_get_pages_range()
2003 if (unlikely(page != xas_reload(&xas))) in find_get_pages_range()
2006 pages[ret] = find_subpage(page, xas.xa_index); in find_get_pages_range()
2008 *start = xas.xa_index + 1; in find_get_pages_range()
2015 xas_reset(&xas); in find_get_pages_range()
2049 XA_STATE(xas, &mapping->i_pages, index); in find_get_pages_contig()
2057 for (page = xas_load(&xas); page; page = xas_next(&xas)) { in find_get_pages_contig()
2058 if (xas_retry(&xas, page)) in find_get_pages_contig()
2071 if (unlikely(page != xas_reload(&xas))) in find_get_pages_contig()
2074 pages[ret] = find_subpage(page, xas.xa_index); in find_get_pages_contig()
2081 xas_reset(&xas); in find_get_pages_contig()
2106 XA_STATE(xas, &mapping->i_pages, *index); in find_get_pages_range_tag()
2114 xas_for_each_marked(&xas, page, end, tag) { in find_get_pages_range_tag()
2115 if (xas_retry(&xas, page)) in find_get_pages_range_tag()
2129 if (unlikely(page != xas_reload(&xas))) in find_get_pages_range_tag()
2132 pages[ret] = find_subpage(page, xas.xa_index); in find_get_pages_range_tag()
2134 *index = xas.xa_index + 1; in find_get_pages_range_tag()
2141 xas_reset(&xas); in find_get_pages_range_tag()
2941 struct xa_state *xas, pgoff_t end_pgoff) in next_uptodate_page() argument
2948 if (xas_retry(xas, page)) in next_uptodate_page()
2957 if (unlikely(page != xas_reload(xas))) in next_uptodate_page()
2970 if (xas->xa_index >= max_idx) in next_uptodate_page()
2977 } while ((page = xas_next_entry(xas, end_pgoff)) != NULL); in next_uptodate_page()
2983 struct xa_state *xas, in first_map_page() argument
2986 return next_uptodate_page(xas_find(xas, end_pgoff), in first_map_page()
2987 mapping, xas, end_pgoff); in first_map_page()
2991 struct xa_state *xas, in next_map_page() argument
2994 return next_uptodate_page(xas_next_entry(xas, end_pgoff), in next_map_page()
2995 mapping, xas, end_pgoff); in next_map_page()
3014 XA_STATE(xas, &mapping->i_pages, start_pgoff); in filemap_map_pages()
3021 head = first_map_page(mapping, &xas, end_pgoff); in filemap_map_pages()
3039 page = find_subpage(head, xas.xa_index); in filemap_map_pages()
3046 addr += (xas.xa_index - last_pgoff) << PAGE_SHIFT; in filemap_map_pages()
3047 vmf->pte += xas.xa_index - last_pgoff; in filemap_map_pages()
3048 last_pgoff = xas.xa_index; in filemap_map_pages()
3065 } while ((head = next_map_page(mapping, &xas, end_pgoff)) != NULL); in filemap_map_pages()