1 // SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note
2 /*
3 *
4 * (C) COPYRIGHT 2019-2021 ARM Limited. All rights reserved.
5 *
6 * This program is free software and is provided to you under the terms of the
7 * GNU General Public License version 2 as published by the Free Software
8 * Foundation, and any use by you of this program is subject to the terms
9 * of such GNU license.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, you can access it online at
18 * http://www.gnu.org/licenses/gpl-2.0.html.
19 *
20 */
21
22 #include "mali_kbase_csf_protected_memory.h"
23 #include <linux/protected_memory_allocator.h>
24
25 #if IS_ENABLED(CONFIG_OF)
26 #include <linux/of_platform.h>
27 #endif
28
kbase_csf_protected_memory_init(struct kbase_device * const kbdev)29 int kbase_csf_protected_memory_init(struct kbase_device *const kbdev)
30 {
31 int err = 0;
32
33 #if IS_ENABLED(CONFIG_OF)
34 struct device_node *pma_node = of_parse_phandle(kbdev->dev->of_node,
35 "protected-memory-allocator", 0);
36 if (!pma_node) {
37 dev_info(kbdev->dev, "Protected memory allocator not available\n");
38 } else {
39 struct platform_device *const pdev =
40 of_find_device_by_node(pma_node);
41
42 kbdev->csf.pma_dev = NULL;
43 if (!pdev) {
44 dev_err(kbdev->dev, "Platform device for Protected memory allocator not found\n");
45 } else {
46 kbdev->csf.pma_dev = platform_get_drvdata(pdev);
47 if (!kbdev->csf.pma_dev) {
48 dev_info(kbdev->dev, "Protected memory allocator is not ready\n");
49 err = -EPROBE_DEFER;
50 } else if (!try_module_get(kbdev->csf.pma_dev->owner)) {
51 dev_err(kbdev->dev, "Failed to get Protected memory allocator module\n");
52 err = -ENODEV;
53 } else {
54 dev_info(kbdev->dev, "Protected memory allocator successfully loaded\n");
55 }
56 }
57 of_node_put(pma_node);
58 }
59 #endif
60
61 return err;
62 }
63
kbase_csf_protected_memory_term(struct kbase_device * const kbdev)64 void kbase_csf_protected_memory_term(struct kbase_device *const kbdev)
65 {
66 if (kbdev->csf.pma_dev)
67 module_put(kbdev->csf.pma_dev->owner);
68 }
69
70 struct protected_memory_allocation **
kbase_csf_protected_memory_alloc(struct kbase_device * const kbdev,struct tagged_addr * phys,size_t num_pages,bool is_small_page)71 kbase_csf_protected_memory_alloc(
72 struct kbase_device *const kbdev,
73 struct tagged_addr *phys,
74 size_t num_pages,
75 bool is_small_page)
76 {
77 size_t i;
78 struct protected_memory_allocator_device *pma_dev =
79 kbdev->csf.pma_dev;
80 struct protected_memory_allocation **pma = NULL;
81 unsigned int order = KBASE_MEM_POOL_2MB_PAGE_TABLE_ORDER;
82 unsigned int num_pages_order;
83
84 if (is_small_page)
85 order = KBASE_MEM_POOL_4KB_PAGE_TABLE_ORDER;
86
87 num_pages_order = (1u << order);
88
89 /* Ensure the requested num_pages is aligned with
90 * the order type passed as argument.
91 *
92 * pma_alloc_page() will then handle the granularity
93 * of the allocation based on order.
94 */
95 num_pages = div64_u64(num_pages + num_pages_order - 1, num_pages_order);
96
97 pma = kmalloc_array(num_pages, sizeof(*pma), GFP_KERNEL);
98
99 if (WARN_ON(!pma_dev) || WARN_ON(!phys) || !pma)
100 return NULL;
101
102 for (i = 0; i < num_pages; i++) {
103 phys_addr_t phys_addr;
104
105 pma[i] = pma_dev->ops.pma_alloc_page(pma_dev, order);
106 if (!pma[i])
107 break;
108
109 phys_addr = pma_dev->ops.pma_get_phys_addr(pma_dev, pma[i]);
110
111 if (order) {
112 size_t j;
113
114 *phys++ = as_tagged_tag(phys_addr, HUGE_HEAD | HUGE_PAGE);
115
116 for (j = 1; j < num_pages_order; j++) {
117 *phys++ = as_tagged_tag(phys_addr +
118 PAGE_SIZE * j,
119 HUGE_PAGE);
120 }
121 } else {
122 phys[i] = as_tagged(phys_addr);
123 }
124 }
125
126 if (i != num_pages) {
127 kbase_csf_protected_memory_free(kbdev, pma, i * num_pages_order, is_small_page);
128 return NULL;
129 }
130
131 return pma;
132 }
133
kbase_csf_protected_memory_free(struct kbase_device * const kbdev,struct protected_memory_allocation ** pma,size_t num_pages,bool is_small_page)134 void kbase_csf_protected_memory_free(
135 struct kbase_device *const kbdev,
136 struct protected_memory_allocation **pma,
137 size_t num_pages,
138 bool is_small_page)
139 {
140 size_t i;
141 struct protected_memory_allocator_device *pma_dev =
142 kbdev->csf.pma_dev;
143 unsigned int num_pages_order = (1u << KBASE_MEM_POOL_2MB_PAGE_TABLE_ORDER);
144
145 if (is_small_page)
146 num_pages_order = (1u << KBASE_MEM_POOL_4KB_PAGE_TABLE_ORDER);
147
148 if (WARN_ON(!pma_dev) || WARN_ON(!pma))
149 return;
150
151 /* Ensure the requested num_pages is aligned with
152 * the order type passed as argument.
153 *
154 * pma_alloc_page() will then handle the granularity
155 * of the allocation based on order.
156 */
157 num_pages = div64_u64(num_pages + num_pages_order - 1, num_pages_order);
158
159 for (i = 0; i < num_pages; i++)
160 pma_dev->ops.pma_free_page(pma_dev, pma[i]);
161
162 kfree(pma);
163 }
164