1 /*
2 * Copyright (c) 2025, MediaTek Inc. All rights reserved.
3 *
4 * SPDX-License-Identifier: BSD-3-Clause
5 */
6
7 #include <assert.h>
8 #include <errno.h>
9
10 #include <common/debug.h>
11 #include <lib/mmio.h>
12 #include <lib/spinlock.h>
13
14 #include <drivers/apusys_rv_public.h>
15 #include <drivers/mminfra_public.h>
16 #include <lib/mtk_init/mtk_init.h>
17 #include <mtk_sip_svc.h>
18
19 #define TAG "[MTK_SMMU]"
20
21 #ifdef SMMU_DBG
22 #define SMMUDBG(fmt, args...) INFO(TAG fmt, ##args)
23 #else
24 #define SMMUDBG(fmt, args...) VERBOSE(TAG fmt, ##args)
25 #endif
26
27 #define SMMU_SUCCESS 0
28 #define SMMU_ID_ERR 1
29 #define SMMU_CMD_ERR 2
30
31 #define F_MSK_SHIFT(val, h, l) (((val) & GENMASK(h, l)) >> (l))
32
33 #define SMMU_SMC_ID_H (10)
34 #define SMMU_SMC_ID_L (8)
35 #define SMMU_SMC_CMD_H (7)
36 #define SMMU_SMC_CMD_L (0)
37
38 /* SMMU CMD Definition From Rich OS */
39 enum smc_cmd {
40 SMMU_SECURE_PM_GET,
41 SMMU_SECURE_PM_PUT,
42 SMMU_CMD_NUM
43 };
44
45 enum smmu_id {
46 MTK_SMMU_ID_MM,
47 MTK_SMMU_ID_APU,
48 MTK_SMMU_ID_SOC,
49 MTK_SMMU_ID_GPU,
50 MTK_SMMU_ID_NUM,
51 };
52
53 enum cmd_source {
54 SMMU_CMD_SOURCE_KERNEL = 0, /* Command comes from kernel */
55 SMMU_CMD_SOURCE_TFA,
56 SMMU_CMD_SOURCE_HYP, /* Command comes from hypervisor */
57 SMMU_CMD_SOURCE_NUM
58 };
59
60 struct hw_sema_t {
61 enum smmu_id id;
62 uint32_t vote[SMMU_CMD_SOURCE_NUM]; /* SW vote count */
63 spinlock_t lock;
64 bool active;
65 };
66
67 static struct hw_sema_t *hw_semas;
68
vote_count_inc(struct hw_sema_t * sema,enum cmd_source id)69 static inline uint32_t vote_count_inc(struct hw_sema_t *sema, enum cmd_source id)
70 {
71 if (sema->vote[id] < UINT32_MAX) {
72 sema->vote[id]++;
73 return sema->vote[id];
74 }
75
76 ERROR(TAG "%s:id:%u:source_id:%u overflow\n", __func__, sema->id, id);
77 return 0;
78 }
79
vote_count_dec(struct hw_sema_t * sema,enum cmd_source id)80 static inline uint32_t vote_count_dec(struct hw_sema_t *sema, enum cmd_source id)
81 {
82 if (sema->vote[id] > 0) {
83 sema->vote[id]--;
84 return sema->vote[id];
85 }
86
87 ERROR(TAG "%s:id:%u:source_id:%u underflow\n", __func__, sema->id, id);
88 return 0;
89 }
90
vote_count(struct hw_sema_t * sema)91 static inline uint32_t vote_count(struct hw_sema_t *sema)
92 {
93 uint32_t i, count = 0;
94
95 for (i = 0; i < SMMU_CMD_SOURCE_NUM; i++)
96 count += sema->vote[i];
97
98 return count;
99 }
100
mtk_smmu_get_hw_sema_cfg(enum smmu_id id)101 static struct hw_sema_t *mtk_smmu_get_hw_sema_cfg(enum smmu_id id)
102 {
103 if (hw_semas == NULL) {
104 ERROR(TAG "%s failed, hw_sema config not ready\n", __func__);
105 return NULL;
106 }
107
108 if (id >= MTK_SMMU_ID_NUM) {
109 ERROR(TAG "%s id:%u not support\n", __func__, id);
110 return NULL;
111 }
112 return &hw_semas[id];
113 }
114
mm_pm_get_if_in_use(struct hw_sema_t * sema,enum cmd_source id)115 static int mm_pm_get_if_in_use(struct hw_sema_t *sema, enum cmd_source id)
116 {
117 uint32_t count;
118 int ret;
119
120 ret = mminfra_get_if_in_use();
121 if (ret != MMINFRA_RET_POWER_ON) {
122 count = vote_count(sema);
123 VERBOSE(TAG "%s:id:%u:source_id:%u:vote:%u:vote_count:%u ret:%d\n",
124 __func__, sema->id, id, sema->vote[id], count, ret);
125 return SMMU_CMD_ERR;
126 }
127 return SMMU_SUCCESS;
128 }
129
mm_pm_put(struct hw_sema_t * sema,enum cmd_source id)130 static int mm_pm_put(struct hw_sema_t *sema, enum cmd_source id)
131 {
132 uint32_t count;
133 int ret;
134
135 ret = mminfra_put();
136 if (ret < 0) {
137 count = vote_count(sema);
138 VERBOSE(TAG "%s:id:%u:source_id:%u:vote:%u:vote_count:%u ret:%d\n",
139 __func__, sema->id, id, sema->vote[id], count, ret);
140 return SMMU_CMD_ERR;
141 }
142 return SMMU_SUCCESS;
143 }
144
mtk_smmu_pm_get(enum smmu_id id,enum cmd_source source_id)145 static int mtk_smmu_pm_get(enum smmu_id id, enum cmd_source source_id)
146 {
147 struct hw_sema_t *hw_sema = mtk_smmu_get_hw_sema_cfg(id);
148 uint32_t count;
149 int ret = SMMU_SUCCESS;
150
151 if (!hw_sema || !hw_sema->active)
152 return 0; /* hw_sema not ready or support, bypass */
153
154 spin_lock(&hw_sema->lock);
155 count = vote_count(hw_sema);
156
157 SMMUDBG("%s:id:%u:source_id:%u:vote:%u:vote_count:%u start\n",
158 __func__, id, source_id, hw_sema->vote[source_id], count);
159
160 if (count > 0) {
161 /* hw_sem was already got */
162 vote_count_inc(hw_sema, source_id);
163 goto out;
164 }
165
166 if (id == MTK_SMMU_ID_APU) {
167 ret = apusys_rv_iommu_hw_sem_trylock();
168 } else if (id == MTK_SMMU_ID_MM) {
169 ret = mm_pm_get_if_in_use(hw_sema, source_id);
170 }
171
172 if (ret == SMMU_SUCCESS)
173 vote_count_inc(hw_sema, source_id);
174
175 out:
176 count = vote_count(hw_sema);
177 SMMUDBG("%s:id:%u:source_id:%u:vote:%u:vote_count:%u end ret:%d\n",
178 __func__, id, source_id, hw_sema->vote[source_id], count, ret);
179
180 spin_unlock(&hw_sema->lock);
181 return ret;
182 }
183
mtk_smmu_pm_put(enum smmu_id id,enum cmd_source source_id)184 static int mtk_smmu_pm_put(enum smmu_id id, enum cmd_source source_id)
185 {
186 struct hw_sema_t *hw_sema = mtk_smmu_get_hw_sema_cfg(id);
187 uint32_t count;
188 int ret = SMMU_SUCCESS;
189
190 if (!hw_sema || !hw_sema->active)
191 return 0; /* hw_sema not ready or support, bypass */
192
193 spin_lock(&hw_sema->lock);
194 count = vote_count(hw_sema);
195
196 SMMUDBG("%s:id:%u:source_id:%u:vote:%u:vote_count:%u start\n",
197 __func__, id, source_id, hw_sema->vote[source_id], count);
198
199 if (count == 0) {
200 /* hw_sem was already released */
201 ERROR(TAG "%s:id:%u, hw_sem already released\n", __func__, id);
202 goto out;
203 }
204
205 if (hw_sema->vote[source_id] == 0) {
206 /* hw_sem was already released */
207 ERROR(TAG "%s:id:%u:source_id:%u, hw_sem already released\n",
208 __func__, id, source_id);
209 goto out;
210 }
211
212 vote_count_dec(hw_sema, source_id);
213 count = vote_count(hw_sema);
214 if (count > 0)
215 goto out; /* hw_sem only vote */
216
217 if (id == MTK_SMMU_ID_APU) {
218 ret = apusys_rv_iommu_hw_sem_unlock();
219 } else if (id == MTK_SMMU_ID_MM) {
220 ret = mm_pm_put(hw_sema, source_id);
221 }
222 out:
223 SMMUDBG("%s:id:%u:source_id:%u:vote:%u:vote_count:%u end ret:%d\n",
224 __func__, id, source_id, hw_sema->vote[source_id], count, ret);
225
226 spin_unlock(&hw_sema->lock);
227 return ret;
228 }
229
230 /*
231 * The function is used handle some request from Rich OS.
232 * x1: TF-A cmd (format: sec[11:11] + smmu_id[10:8] + cmd_id[7:0])
233 * x2: other parameters
234 */
mtk_smmu_handler(u_register_t x1,u_register_t x2,u_register_t x3,u_register_t x4,void * handle,struct smccc_res * smccc_ret)235 static u_register_t mtk_smmu_handler(u_register_t x1, u_register_t x2,
236 u_register_t x3, u_register_t x4,
237 void *handle, struct smccc_res *smccc_ret)
238 {
239 uint32_t ret = SMMU_CMD_ERR;
240 uint32_t cmd_id = F_MSK_SHIFT(x1, SMMU_SMC_CMD_H, SMMU_SMC_CMD_L);
241 enum smmu_id smmu_id = F_MSK_SHIFT(x1, SMMU_SMC_ID_H, SMMU_SMC_ID_L);
242 enum cmd_source source_id = (enum cmd_source)x2;
243
244 if (smmu_id >= MTK_SMMU_ID_NUM || source_id >= SMMU_CMD_SOURCE_NUM)
245 return SMMU_ID_ERR;
246
247 switch (cmd_id) {
248 case SMMU_SECURE_PM_GET:
249 ret = mtk_smmu_pm_get(smmu_id, source_id);
250 break;
251 case SMMU_SECURE_PM_PUT:
252 ret = mtk_smmu_pm_put(smmu_id, source_id);
253 break;
254 default:
255 break;
256 }
257
258 return ret;
259 }
260 /* Register MTK SMMU service */
261 DECLARE_SMC_HANDLER(MTK_SIP_IOMMU_CONTROL, mtk_smmu_handler);
262
263 #if defined(MTK_SMMU_MT8196)
264 static struct hw_sema_t smmu_hw_semas[MTK_SMMU_ID_NUM] = {
265 {
266 .id = MTK_SMMU_ID_MM,
267 .active = true,
268 },
269 {
270 .id = MTK_SMMU_ID_APU,
271 .active = true,
272 },
273 {
274 .id = MTK_SMMU_ID_SOC,
275 .active = false,
276 },
277 {
278 .id = MTK_SMMU_ID_GPU,
279 .active = true,
280 },
281 };
282 #else
283 static struct hw_sema_t *smmu_hw_semas;
284 #endif
285
286 /* Register MTK SMMU driver setup init function */
mtk_smmu_init(void)287 static int mtk_smmu_init(void)
288 {
289 hw_semas = smmu_hw_semas;
290
291 if (!hw_semas) {
292 ERROR("%s: failed.\n", __func__);
293 return -ENODEV;
294 }
295 SMMUDBG("%s done.\n", __func__);
296 return 0;
297 }
298 MTK_PLAT_SETUP_0_INIT(mtk_smmu_init);
299