1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun
3*4882a593Smuzhiyun #include <linux/pagemap.h>
4*4882a593Smuzhiyun #include <linux/xarray.h>
5*4882a593Smuzhiyun #include <linux/slab.h>
6*4882a593Smuzhiyun #include <linux/swap.h>
7*4882a593Smuzhiyun #include <linux/swapops.h>
8*4882a593Smuzhiyun #include <asm/mte.h>
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun static DEFINE_XARRAY(mte_pages);
11*4882a593Smuzhiyun
mte_allocate_tag_storage(void)12*4882a593Smuzhiyun void *mte_allocate_tag_storage(void)
13*4882a593Smuzhiyun {
14*4882a593Smuzhiyun /* tags granule is 16 bytes, 2 tags stored per byte */
15*4882a593Smuzhiyun return kmalloc(PAGE_SIZE / 16 / 2, GFP_KERNEL);
16*4882a593Smuzhiyun }
17*4882a593Smuzhiyun
mte_free_tag_storage(char * storage)18*4882a593Smuzhiyun void mte_free_tag_storage(char *storage)
19*4882a593Smuzhiyun {
20*4882a593Smuzhiyun kfree(storage);
21*4882a593Smuzhiyun }
22*4882a593Smuzhiyun
mte_save_tags(struct page * page)23*4882a593Smuzhiyun int mte_save_tags(struct page *page)
24*4882a593Smuzhiyun {
25*4882a593Smuzhiyun void *tag_storage, *ret;
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun if (!test_bit(PG_mte_tagged, &page->flags))
28*4882a593Smuzhiyun return 0;
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun tag_storage = mte_allocate_tag_storage();
31*4882a593Smuzhiyun if (!tag_storage)
32*4882a593Smuzhiyun return -ENOMEM;
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun mte_save_page_tags(page_address(page), tag_storage);
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun /* page_private contains the swap entry.val set in do_swap_page */
37*4882a593Smuzhiyun ret = xa_store(&mte_pages, page_private(page), tag_storage, GFP_KERNEL);
38*4882a593Smuzhiyun if (WARN(xa_is_err(ret), "Failed to store MTE tags")) {
39*4882a593Smuzhiyun mte_free_tag_storage(tag_storage);
40*4882a593Smuzhiyun return xa_err(ret);
41*4882a593Smuzhiyun } else if (ret) {
42*4882a593Smuzhiyun /* Entry is being replaced, free the old entry */
43*4882a593Smuzhiyun mte_free_tag_storage(ret);
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun return 0;
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun
mte_restore_tags(swp_entry_t entry,struct page * page)49*4882a593Smuzhiyun bool mte_restore_tags(swp_entry_t entry, struct page *page)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun void *tags = xa_load(&mte_pages, entry.val);
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun if (!tags)
54*4882a593Smuzhiyun return false;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun page_kasan_tag_reset(page);
57*4882a593Smuzhiyun /*
58*4882a593Smuzhiyun * We need smp_wmb() in between setting the flags and clearing the
59*4882a593Smuzhiyun * tags because if another thread reads page->flags and builds a
60*4882a593Smuzhiyun * tagged address out of it, there is an actual dependency to the
61*4882a593Smuzhiyun * memory access, but on the current thread we do not guarantee that
62*4882a593Smuzhiyun * the new page->flags are visible before the tags were updated.
63*4882a593Smuzhiyun */
64*4882a593Smuzhiyun smp_wmb();
65*4882a593Smuzhiyun mte_restore_page_tags(page_address(page), tags);
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun return true;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
mte_invalidate_tags(int type,pgoff_t offset)70*4882a593Smuzhiyun void mte_invalidate_tags(int type, pgoff_t offset)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun swp_entry_t entry = swp_entry(type, offset);
73*4882a593Smuzhiyun void *tags = xa_erase(&mte_pages, entry.val);
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun mte_free_tag_storage(tags);
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
mte_invalidate_tags_area(int type)78*4882a593Smuzhiyun void mte_invalidate_tags_area(int type)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun swp_entry_t entry = swp_entry(type, 0);
81*4882a593Smuzhiyun swp_entry_t last_entry = swp_entry(type + 1, 0);
82*4882a593Smuzhiyun void *tags;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun XA_STATE(xa_state, &mte_pages, entry.val);
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun xa_lock(&mte_pages);
87*4882a593Smuzhiyun xas_for_each(&xa_state, tags, last_entry.val - 1) {
88*4882a593Smuzhiyun __xa_erase(&mte_pages, xa_state.xa_index);
89*4882a593Smuzhiyun mte_free_tag_storage(tags);
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun xa_unlock(&mte_pages);
92*4882a593Smuzhiyun }
93