1 /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
2 /*
3 *
4 * (C) COPYRIGHT 2012-2014, 2016-2018, 2020-2022 ARM Limited. All rights reserved.
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU license.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 */
21
22 #ifndef _KBASE_MEM_LOWLEVEL_H
23 #define _KBASE_MEM_LOWLEVEL_H
24
25 #ifndef _KBASE_H_
26 #error "Don't include this file directly, use mali_kbase.h instead"
27 #endif
28
29 #include <linux/dma-mapping.h>
30
31 /* Flags for kbase_phy_allocator_pages_alloc */
32 #define KBASE_PHY_PAGES_FLAG_DEFAULT (0) /** Default allocation flag */
33 #define KBASE_PHY_PAGES_FLAG_CLEAR (1 << 0) /** Clear the pages after allocation */
34 #define KBASE_PHY_PAGES_FLAG_POISON (1 << 1) /** Fill the memory with a poison value */
35
36 #define KBASE_PHY_PAGES_SUPPORTED_FLAGS (KBASE_PHY_PAGES_FLAG_DEFAULT|KBASE_PHY_PAGES_FLAG_CLEAR|KBASE_PHY_PAGES_FLAG_POISON)
37
38 #define KBASE_PHY_PAGES_POISON_VALUE 0xFD /** Value to fill the memory with when KBASE_PHY_PAGES_FLAG_POISON is set */
39
40 enum kbase_sync_type {
41 KBASE_SYNC_TO_CPU,
42 KBASE_SYNC_TO_DEVICE
43 };
44
45 struct tagged_addr { phys_addr_t tagged_addr; };
46
47 #define HUGE_PAGE (1u << 0)
48 #define HUGE_HEAD (1u << 1)
49 #define FROM_PARTIAL (1u << 2)
50
51 #define NUM_4K_PAGES_IN_2MB_PAGE (SZ_2M / SZ_4K)
52
53 /*
54 * Note: if macro for converting physical address to page is not defined
55 * in the kernel itself, it is defined hereby. This is to avoid build errors
56 * which are reported during builds for some architectures.
57 */
58 #ifndef phys_to_page
59 #define phys_to_page(phys) (pfn_to_page((phys) >> PAGE_SHIFT))
60 #endif
61
62 /**
63 * as_phys_addr_t - Retrieve the physical address from tagged address by
64 * masking the lower order 12 bits.
65 * @t: tagged address to be translated.
66 *
67 * Return: physical address corresponding to tagged address.
68 */
as_phys_addr_t(struct tagged_addr t)69 static inline phys_addr_t as_phys_addr_t(struct tagged_addr t)
70 {
71 return t.tagged_addr & PAGE_MASK;
72 }
73
74 /**
75 * as_page - Retrieve the struct page from a tagged address
76 * @t: tagged address to be translated.
77 *
78 * Return: pointer to struct page corresponding to tagged address.
79 */
as_page(struct tagged_addr t)80 static inline struct page *as_page(struct tagged_addr t)
81 {
82 return phys_to_page(as_phys_addr_t(t));
83 }
84
85 /**
86 * as_tagged - Convert the physical address to tagged address type though
87 * there is no tag info present, the lower order 12 bits will be 0
88 * @phys: physical address to be converted to tagged type
89 *
90 * This is used for 4KB physical pages allocated by the Driver or imported pages
91 * and is needed as physical pages tracking object stores the reference for
92 * physical pages using tagged address type in lieu of the type generally used
93 * for physical addresses.
94 *
95 * Return: address of tagged address type.
96 */
as_tagged(phys_addr_t phys)97 static inline struct tagged_addr as_tagged(phys_addr_t phys)
98 {
99 struct tagged_addr t;
100
101 t.tagged_addr = phys & PAGE_MASK;
102 return t;
103 }
104
105 /**
106 * as_tagged_tag - Form the tagged address by storing the tag or metadata in the
107 * lower order 12 bits of physial address
108 * @phys: physical address to be converted to tagged address
109 * @tag: tag to be stored along with the physical address.
110 *
111 * The tag info is used while freeing up the pages
112 *
113 * Return: tagged address storing physical address & tag.
114 */
as_tagged_tag(phys_addr_t phys,int tag)115 static inline struct tagged_addr as_tagged_tag(phys_addr_t phys, int tag)
116 {
117 struct tagged_addr t;
118
119 t.tagged_addr = (phys & PAGE_MASK) | (tag & ~PAGE_MASK);
120 return t;
121 }
122
123 /**
124 * is_huge - Check if the physical page is one of the 512 4KB pages of the
125 * large page which was not split to be used partially
126 * @t: tagged address storing the tag in the lower order bits.
127 *
128 * Return: true if page belongs to large page, or false
129 */
is_huge(struct tagged_addr t)130 static inline bool is_huge(struct tagged_addr t)
131 {
132 return t.tagged_addr & HUGE_PAGE;
133 }
134
135 /**
136 * is_huge_head - Check if the physical page is the first 4KB page of the
137 * 512 4KB pages within a large page which was not split
138 * to be used partially
139 * @t: tagged address storing the tag in the lower order bits.
140 *
141 * Return: true if page is the first page of a large page, or false
142 */
is_huge_head(struct tagged_addr t)143 static inline bool is_huge_head(struct tagged_addr t)
144 {
145 int mask = HUGE_HEAD | HUGE_PAGE;
146
147 return mask == (t.tagged_addr & mask);
148 }
149
150 /**
151 * is_partial - Check if the physical page is one of the 512 pages of the
152 * large page which was split in 4KB pages to be used
153 * partially for allocations >= 2 MB in size.
154 * @t: tagged address storing the tag in the lower order bits.
155 *
156 * Return: true if page was taken from large page used partially, or false
157 */
is_partial(struct tagged_addr t)158 static inline bool is_partial(struct tagged_addr t)
159 {
160 return t.tagged_addr & FROM_PARTIAL;
161 }
162
163 /**
164 * index_in_large_page() - Get index of a 4KB page within a 2MB page which
165 * wasn't split to be used partially.
166 *
167 * @t: Tagged physical address of the physical 4KB page that lies within
168 * the large (or 2 MB) physical page.
169 *
170 * Return: Index of the 4KB page within a 2MB page
171 */
index_in_large_page(struct tagged_addr t)172 static inline unsigned int index_in_large_page(struct tagged_addr t)
173 {
174 WARN_ON(!is_huge(t));
175
176 return (PFN_DOWN(as_phys_addr_t(t)) & (NUM_4K_PAGES_IN_2MB_PAGE - 1));
177 }
178
179 #endif /* _KBASE_LOWLEVEL_H */
180