1*4882a593Smuzhiyun // SPDX-License-Identifier: MIT
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright 2019 Advanced Micro Devices, Inc.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #include <linux/errno.h>
7*4882a593Smuzhiyun #include <linux/io.h>
8*4882a593Smuzhiyun #include <linux/module.h>
9*4882a593Smuzhiyun #include <linux/slab.h>
10*4882a593Smuzhiyun #include <linux/string.h>
11*4882a593Smuzhiyun #include <linux/device.h>
12*4882a593Smuzhiyun #include <linux/tee_drv.h>
13*4882a593Smuzhiyun #include <linux/types.h>
14*4882a593Smuzhiyun #include <linux/mm.h>
15*4882a593Smuzhiyun #include <linux/uaccess.h>
16*4882a593Smuzhiyun #include <linux/firmware.h>
17*4882a593Smuzhiyun #include "amdtee_private.h"
18*4882a593Smuzhiyun #include "../tee_private.h"
19*4882a593Smuzhiyun #include <linux/psp-tee.h>
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun static struct amdtee_driver_data *drv_data;
22*4882a593Smuzhiyun static DEFINE_MUTEX(session_list_mutex);
23*4882a593Smuzhiyun
amdtee_get_version(struct tee_device * teedev,struct tee_ioctl_version_data * vers)24*4882a593Smuzhiyun static void amdtee_get_version(struct tee_device *teedev,
25*4882a593Smuzhiyun struct tee_ioctl_version_data *vers)
26*4882a593Smuzhiyun {
27*4882a593Smuzhiyun struct tee_ioctl_version_data v = {
28*4882a593Smuzhiyun .impl_id = TEE_IMPL_ID_AMDTEE,
29*4882a593Smuzhiyun .impl_caps = 0,
30*4882a593Smuzhiyun .gen_caps = TEE_GEN_CAP_GP,
31*4882a593Smuzhiyun };
32*4882a593Smuzhiyun *vers = v;
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun
amdtee_open(struct tee_context * ctx)35*4882a593Smuzhiyun static int amdtee_open(struct tee_context *ctx)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun struct amdtee_context_data *ctxdata;
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun ctxdata = kzalloc(sizeof(*ctxdata), GFP_KERNEL);
40*4882a593Smuzhiyun if (!ctxdata)
41*4882a593Smuzhiyun return -ENOMEM;
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun INIT_LIST_HEAD(&ctxdata->sess_list);
44*4882a593Smuzhiyun INIT_LIST_HEAD(&ctxdata->shm_list);
45*4882a593Smuzhiyun mutex_init(&ctxdata->shm_mutex);
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun ctx->data = ctxdata;
48*4882a593Smuzhiyun return 0;
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun
release_session(struct amdtee_session * sess)51*4882a593Smuzhiyun static void release_session(struct amdtee_session *sess)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun int i;
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun /* Close any open session */
56*4882a593Smuzhiyun for (i = 0; i < TEE_NUM_SESSIONS; ++i) {
57*4882a593Smuzhiyun /* Check if session entry 'i' is valid */
58*4882a593Smuzhiyun if (!test_bit(i, sess->sess_mask))
59*4882a593Smuzhiyun continue;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun handle_close_session(sess->ta_handle, sess->session_info[i]);
62*4882a593Smuzhiyun handle_unload_ta(sess->ta_handle);
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun kfree(sess);
66*4882a593Smuzhiyun }
67*4882a593Smuzhiyun
amdtee_release(struct tee_context * ctx)68*4882a593Smuzhiyun static void amdtee_release(struct tee_context *ctx)
69*4882a593Smuzhiyun {
70*4882a593Smuzhiyun struct amdtee_context_data *ctxdata = ctx->data;
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun if (!ctxdata)
73*4882a593Smuzhiyun return;
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun while (true) {
76*4882a593Smuzhiyun struct amdtee_session *sess;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun sess = list_first_entry_or_null(&ctxdata->sess_list,
79*4882a593Smuzhiyun struct amdtee_session,
80*4882a593Smuzhiyun list_node);
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun if (!sess)
83*4882a593Smuzhiyun break;
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun list_del(&sess->list_node);
86*4882a593Smuzhiyun release_session(sess);
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun mutex_destroy(&ctxdata->shm_mutex);
89*4882a593Smuzhiyun kfree(ctxdata);
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun ctx->data = NULL;
92*4882a593Smuzhiyun }
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun /**
95*4882a593Smuzhiyun * alloc_session() - Allocate a session structure
96*4882a593Smuzhiyun * @ctxdata: TEE Context data structure
97*4882a593Smuzhiyun * @session: Session ID for which 'struct amdtee_session' structure is to be
98*4882a593Smuzhiyun * allocated.
99*4882a593Smuzhiyun *
100*4882a593Smuzhiyun * Scans the TEE context's session list to check if TA is already loaded in to
101*4882a593Smuzhiyun * TEE. If yes, returns the 'session' structure for that TA. Else allocates,
102*4882a593Smuzhiyun * initializes a new 'session' structure and adds it to context's session list.
103*4882a593Smuzhiyun *
104*4882a593Smuzhiyun * The caller must hold a mutex.
105*4882a593Smuzhiyun *
106*4882a593Smuzhiyun * Returns:
107*4882a593Smuzhiyun * 'struct amdtee_session *' on success and NULL on failure.
108*4882a593Smuzhiyun */
alloc_session(struct amdtee_context_data * ctxdata,u32 session)109*4882a593Smuzhiyun static struct amdtee_session *alloc_session(struct amdtee_context_data *ctxdata,
110*4882a593Smuzhiyun u32 session)
111*4882a593Smuzhiyun {
112*4882a593Smuzhiyun struct amdtee_session *sess;
113*4882a593Smuzhiyun u32 ta_handle = get_ta_handle(session);
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun /* Scan session list to check if TA is already loaded in to TEE */
116*4882a593Smuzhiyun list_for_each_entry(sess, &ctxdata->sess_list, list_node)
117*4882a593Smuzhiyun if (sess->ta_handle == ta_handle) {
118*4882a593Smuzhiyun kref_get(&sess->refcount);
119*4882a593Smuzhiyun return sess;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun /* Allocate a new session and add to list */
123*4882a593Smuzhiyun sess = kzalloc(sizeof(*sess), GFP_KERNEL);
124*4882a593Smuzhiyun if (sess) {
125*4882a593Smuzhiyun sess->ta_handle = ta_handle;
126*4882a593Smuzhiyun kref_init(&sess->refcount);
127*4882a593Smuzhiyun spin_lock_init(&sess->lock);
128*4882a593Smuzhiyun list_add(&sess->list_node, &ctxdata->sess_list);
129*4882a593Smuzhiyun }
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun return sess;
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun /* Requires mutex to be held */
find_session(struct amdtee_context_data * ctxdata,u32 session)135*4882a593Smuzhiyun static struct amdtee_session *find_session(struct amdtee_context_data *ctxdata,
136*4882a593Smuzhiyun u32 session)
137*4882a593Smuzhiyun {
138*4882a593Smuzhiyun u32 ta_handle = get_ta_handle(session);
139*4882a593Smuzhiyun u32 index = get_session_index(session);
140*4882a593Smuzhiyun struct amdtee_session *sess;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun if (index >= TEE_NUM_SESSIONS)
143*4882a593Smuzhiyun return NULL;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun list_for_each_entry(sess, &ctxdata->sess_list, list_node)
146*4882a593Smuzhiyun if (ta_handle == sess->ta_handle &&
147*4882a593Smuzhiyun test_bit(index, sess->sess_mask))
148*4882a593Smuzhiyun return sess;
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun return NULL;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
get_buffer_id(struct tee_shm * shm)153*4882a593Smuzhiyun u32 get_buffer_id(struct tee_shm *shm)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun struct amdtee_context_data *ctxdata = shm->ctx->data;
156*4882a593Smuzhiyun struct amdtee_shm_data *shmdata;
157*4882a593Smuzhiyun u32 buf_id = 0;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun mutex_lock(&ctxdata->shm_mutex);
160*4882a593Smuzhiyun list_for_each_entry(shmdata, &ctxdata->shm_list, shm_node)
161*4882a593Smuzhiyun if (shmdata->kaddr == shm->kaddr) {
162*4882a593Smuzhiyun buf_id = shmdata->buf_id;
163*4882a593Smuzhiyun break;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun mutex_unlock(&ctxdata->shm_mutex);
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun return buf_id;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun static DEFINE_MUTEX(drv_mutex);
copy_ta_binary(struct tee_context * ctx,void * ptr,void ** ta,size_t * ta_size)171*4882a593Smuzhiyun static int copy_ta_binary(struct tee_context *ctx, void *ptr, void **ta,
172*4882a593Smuzhiyun size_t *ta_size)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun const struct firmware *fw;
175*4882a593Smuzhiyun char fw_name[TA_PATH_MAX];
176*4882a593Smuzhiyun struct {
177*4882a593Smuzhiyun u32 lo;
178*4882a593Smuzhiyun u16 mid;
179*4882a593Smuzhiyun u16 hi_ver;
180*4882a593Smuzhiyun u8 seq_n[8];
181*4882a593Smuzhiyun } *uuid = ptr;
182*4882a593Smuzhiyun int n, rc = 0;
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun n = snprintf(fw_name, TA_PATH_MAX,
185*4882a593Smuzhiyun "%s/%08x-%04x-%04x-%02x%02x%02x%02x%02x%02x%02x%02x.bin",
186*4882a593Smuzhiyun TA_LOAD_PATH, uuid->lo, uuid->mid, uuid->hi_ver,
187*4882a593Smuzhiyun uuid->seq_n[0], uuid->seq_n[1],
188*4882a593Smuzhiyun uuid->seq_n[2], uuid->seq_n[3],
189*4882a593Smuzhiyun uuid->seq_n[4], uuid->seq_n[5],
190*4882a593Smuzhiyun uuid->seq_n[6], uuid->seq_n[7]);
191*4882a593Smuzhiyun if (n < 0 || n >= TA_PATH_MAX) {
192*4882a593Smuzhiyun pr_err("failed to get firmware name\n");
193*4882a593Smuzhiyun return -EINVAL;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun mutex_lock(&drv_mutex);
197*4882a593Smuzhiyun n = request_firmware(&fw, fw_name, &ctx->teedev->dev);
198*4882a593Smuzhiyun if (n) {
199*4882a593Smuzhiyun pr_err("failed to load firmware %s\n", fw_name);
200*4882a593Smuzhiyun rc = -ENOMEM;
201*4882a593Smuzhiyun goto unlock;
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun *ta_size = roundup(fw->size, PAGE_SIZE);
205*4882a593Smuzhiyun *ta = (void *)__get_free_pages(GFP_KERNEL, get_order(*ta_size));
206*4882a593Smuzhiyun if (!*ta) {
207*4882a593Smuzhiyun pr_err("%s: get_free_pages failed\n", __func__);
208*4882a593Smuzhiyun rc = -ENOMEM;
209*4882a593Smuzhiyun goto rel_fw;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun memcpy(*ta, fw->data, fw->size);
213*4882a593Smuzhiyun rel_fw:
214*4882a593Smuzhiyun release_firmware(fw);
215*4882a593Smuzhiyun unlock:
216*4882a593Smuzhiyun mutex_unlock(&drv_mutex);
217*4882a593Smuzhiyun return rc;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
destroy_session(struct kref * ref)220*4882a593Smuzhiyun static void destroy_session(struct kref *ref)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun struct amdtee_session *sess = container_of(ref, struct amdtee_session,
223*4882a593Smuzhiyun refcount);
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun mutex_lock(&session_list_mutex);
226*4882a593Smuzhiyun list_del(&sess->list_node);
227*4882a593Smuzhiyun mutex_unlock(&session_list_mutex);
228*4882a593Smuzhiyun kfree(sess);
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun
amdtee_open_session(struct tee_context * ctx,struct tee_ioctl_open_session_arg * arg,struct tee_param * param)231*4882a593Smuzhiyun int amdtee_open_session(struct tee_context *ctx,
232*4882a593Smuzhiyun struct tee_ioctl_open_session_arg *arg,
233*4882a593Smuzhiyun struct tee_param *param)
234*4882a593Smuzhiyun {
235*4882a593Smuzhiyun struct amdtee_context_data *ctxdata = ctx->data;
236*4882a593Smuzhiyun struct amdtee_session *sess = NULL;
237*4882a593Smuzhiyun u32 session_info, ta_handle;
238*4882a593Smuzhiyun size_t ta_size;
239*4882a593Smuzhiyun int rc, i;
240*4882a593Smuzhiyun void *ta;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun if (arg->clnt_login != TEE_IOCTL_LOGIN_PUBLIC) {
243*4882a593Smuzhiyun pr_err("unsupported client login method\n");
244*4882a593Smuzhiyun return -EINVAL;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun rc = copy_ta_binary(ctx, &arg->uuid[0], &ta, &ta_size);
248*4882a593Smuzhiyun if (rc) {
249*4882a593Smuzhiyun pr_err("failed to copy TA binary\n");
250*4882a593Smuzhiyun return rc;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun /* Load the TA binary into TEE environment */
254*4882a593Smuzhiyun handle_load_ta(ta, ta_size, arg);
255*4882a593Smuzhiyun if (arg->ret != TEEC_SUCCESS)
256*4882a593Smuzhiyun goto out;
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun ta_handle = get_ta_handle(arg->session);
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun mutex_lock(&session_list_mutex);
261*4882a593Smuzhiyun sess = alloc_session(ctxdata, arg->session);
262*4882a593Smuzhiyun mutex_unlock(&session_list_mutex);
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun if (!sess) {
265*4882a593Smuzhiyun handle_unload_ta(ta_handle);
266*4882a593Smuzhiyun rc = -ENOMEM;
267*4882a593Smuzhiyun goto out;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun /* Find an empty session index for the given TA */
271*4882a593Smuzhiyun spin_lock(&sess->lock);
272*4882a593Smuzhiyun i = find_first_zero_bit(sess->sess_mask, TEE_NUM_SESSIONS);
273*4882a593Smuzhiyun if (i < TEE_NUM_SESSIONS)
274*4882a593Smuzhiyun set_bit(i, sess->sess_mask);
275*4882a593Smuzhiyun spin_unlock(&sess->lock);
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun if (i >= TEE_NUM_SESSIONS) {
278*4882a593Smuzhiyun pr_err("reached maximum session count %d\n", TEE_NUM_SESSIONS);
279*4882a593Smuzhiyun handle_unload_ta(ta_handle);
280*4882a593Smuzhiyun kref_put(&sess->refcount, destroy_session);
281*4882a593Smuzhiyun rc = -ENOMEM;
282*4882a593Smuzhiyun goto out;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun /* Open session with loaded TA */
286*4882a593Smuzhiyun handle_open_session(arg, &session_info, param);
287*4882a593Smuzhiyun if (arg->ret != TEEC_SUCCESS) {
288*4882a593Smuzhiyun pr_err("open_session failed %d\n", arg->ret);
289*4882a593Smuzhiyun spin_lock(&sess->lock);
290*4882a593Smuzhiyun clear_bit(i, sess->sess_mask);
291*4882a593Smuzhiyun spin_unlock(&sess->lock);
292*4882a593Smuzhiyun handle_unload_ta(ta_handle);
293*4882a593Smuzhiyun kref_put(&sess->refcount, destroy_session);
294*4882a593Smuzhiyun goto out;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun sess->session_info[i] = session_info;
298*4882a593Smuzhiyun set_session_id(ta_handle, i, &arg->session);
299*4882a593Smuzhiyun out:
300*4882a593Smuzhiyun free_pages((u64)ta, get_order(ta_size));
301*4882a593Smuzhiyun return rc;
302*4882a593Smuzhiyun }
303*4882a593Smuzhiyun
amdtee_close_session(struct tee_context * ctx,u32 session)304*4882a593Smuzhiyun int amdtee_close_session(struct tee_context *ctx, u32 session)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun struct amdtee_context_data *ctxdata = ctx->data;
307*4882a593Smuzhiyun u32 i, ta_handle, session_info;
308*4882a593Smuzhiyun struct amdtee_session *sess;
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun pr_debug("%s: sid = 0x%x\n", __func__, session);
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun /*
313*4882a593Smuzhiyun * Check that the session is valid and clear the session
314*4882a593Smuzhiyun * usage bit
315*4882a593Smuzhiyun */
316*4882a593Smuzhiyun mutex_lock(&session_list_mutex);
317*4882a593Smuzhiyun sess = find_session(ctxdata, session);
318*4882a593Smuzhiyun if (sess) {
319*4882a593Smuzhiyun ta_handle = get_ta_handle(session);
320*4882a593Smuzhiyun i = get_session_index(session);
321*4882a593Smuzhiyun session_info = sess->session_info[i];
322*4882a593Smuzhiyun spin_lock(&sess->lock);
323*4882a593Smuzhiyun clear_bit(i, sess->sess_mask);
324*4882a593Smuzhiyun spin_unlock(&sess->lock);
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun mutex_unlock(&session_list_mutex);
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun if (!sess)
329*4882a593Smuzhiyun return -EINVAL;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun /* Close the session */
332*4882a593Smuzhiyun handle_close_session(ta_handle, session_info);
333*4882a593Smuzhiyun handle_unload_ta(ta_handle);
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun kref_put(&sess->refcount, destroy_session);
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun return 0;
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun
amdtee_map_shmem(struct tee_shm * shm)340*4882a593Smuzhiyun int amdtee_map_shmem(struct tee_shm *shm)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun struct amdtee_context_data *ctxdata;
343*4882a593Smuzhiyun struct amdtee_shm_data *shmnode;
344*4882a593Smuzhiyun struct shmem_desc shmem;
345*4882a593Smuzhiyun int rc, count;
346*4882a593Smuzhiyun u32 buf_id;
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun if (!shm)
349*4882a593Smuzhiyun return -EINVAL;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun shmnode = kmalloc(sizeof(*shmnode), GFP_KERNEL);
352*4882a593Smuzhiyun if (!shmnode)
353*4882a593Smuzhiyun return -ENOMEM;
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun count = 1;
356*4882a593Smuzhiyun shmem.kaddr = shm->kaddr;
357*4882a593Smuzhiyun shmem.size = shm->size;
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun /*
360*4882a593Smuzhiyun * Send a MAP command to TEE and get the corresponding
361*4882a593Smuzhiyun * buffer Id
362*4882a593Smuzhiyun */
363*4882a593Smuzhiyun rc = handle_map_shmem(count, &shmem, &buf_id);
364*4882a593Smuzhiyun if (rc) {
365*4882a593Smuzhiyun pr_err("map_shmem failed: ret = %d\n", rc);
366*4882a593Smuzhiyun kfree(shmnode);
367*4882a593Smuzhiyun return rc;
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun shmnode->kaddr = shm->kaddr;
371*4882a593Smuzhiyun shmnode->buf_id = buf_id;
372*4882a593Smuzhiyun ctxdata = shm->ctx->data;
373*4882a593Smuzhiyun mutex_lock(&ctxdata->shm_mutex);
374*4882a593Smuzhiyun list_add(&shmnode->shm_node, &ctxdata->shm_list);
375*4882a593Smuzhiyun mutex_unlock(&ctxdata->shm_mutex);
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun pr_debug("buf_id :[%x] kaddr[%p]\n", shmnode->buf_id, shmnode->kaddr);
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun return 0;
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun
amdtee_unmap_shmem(struct tee_shm * shm)382*4882a593Smuzhiyun void amdtee_unmap_shmem(struct tee_shm *shm)
383*4882a593Smuzhiyun {
384*4882a593Smuzhiyun struct amdtee_context_data *ctxdata;
385*4882a593Smuzhiyun struct amdtee_shm_data *shmnode;
386*4882a593Smuzhiyun u32 buf_id;
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun if (!shm)
389*4882a593Smuzhiyun return;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun buf_id = get_buffer_id(shm);
392*4882a593Smuzhiyun /* Unmap the shared memory from TEE */
393*4882a593Smuzhiyun handle_unmap_shmem(buf_id);
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun ctxdata = shm->ctx->data;
396*4882a593Smuzhiyun mutex_lock(&ctxdata->shm_mutex);
397*4882a593Smuzhiyun list_for_each_entry(shmnode, &ctxdata->shm_list, shm_node)
398*4882a593Smuzhiyun if (buf_id == shmnode->buf_id) {
399*4882a593Smuzhiyun list_del(&shmnode->shm_node);
400*4882a593Smuzhiyun kfree(shmnode);
401*4882a593Smuzhiyun break;
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun mutex_unlock(&ctxdata->shm_mutex);
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
amdtee_invoke_func(struct tee_context * ctx,struct tee_ioctl_invoke_arg * arg,struct tee_param * param)406*4882a593Smuzhiyun int amdtee_invoke_func(struct tee_context *ctx,
407*4882a593Smuzhiyun struct tee_ioctl_invoke_arg *arg,
408*4882a593Smuzhiyun struct tee_param *param)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun struct amdtee_context_data *ctxdata = ctx->data;
411*4882a593Smuzhiyun struct amdtee_session *sess;
412*4882a593Smuzhiyun u32 i, session_info;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun /* Check that the session is valid */
415*4882a593Smuzhiyun mutex_lock(&session_list_mutex);
416*4882a593Smuzhiyun sess = find_session(ctxdata, arg->session);
417*4882a593Smuzhiyun if (sess) {
418*4882a593Smuzhiyun i = get_session_index(arg->session);
419*4882a593Smuzhiyun session_info = sess->session_info[i];
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun mutex_unlock(&session_list_mutex);
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun if (!sess)
424*4882a593Smuzhiyun return -EINVAL;
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun handle_invoke_cmd(arg, session_info, param);
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun return 0;
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun
amdtee_cancel_req(struct tee_context * ctx,u32 cancel_id,u32 session)431*4882a593Smuzhiyun int amdtee_cancel_req(struct tee_context *ctx, u32 cancel_id, u32 session)
432*4882a593Smuzhiyun {
433*4882a593Smuzhiyun return -EINVAL;
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun static const struct tee_driver_ops amdtee_ops = {
437*4882a593Smuzhiyun .get_version = amdtee_get_version,
438*4882a593Smuzhiyun .open = amdtee_open,
439*4882a593Smuzhiyun .release = amdtee_release,
440*4882a593Smuzhiyun .open_session = amdtee_open_session,
441*4882a593Smuzhiyun .close_session = amdtee_close_session,
442*4882a593Smuzhiyun .invoke_func = amdtee_invoke_func,
443*4882a593Smuzhiyun .cancel_req = amdtee_cancel_req,
444*4882a593Smuzhiyun };
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun static const struct tee_desc amdtee_desc = {
447*4882a593Smuzhiyun .name = DRIVER_NAME "-clnt",
448*4882a593Smuzhiyun .ops = &amdtee_ops,
449*4882a593Smuzhiyun .owner = THIS_MODULE,
450*4882a593Smuzhiyun };
451*4882a593Smuzhiyun
amdtee_driver_init(void)452*4882a593Smuzhiyun static int __init amdtee_driver_init(void)
453*4882a593Smuzhiyun {
454*4882a593Smuzhiyun struct tee_device *teedev;
455*4882a593Smuzhiyun struct tee_shm_pool *pool;
456*4882a593Smuzhiyun struct amdtee *amdtee;
457*4882a593Smuzhiyun int rc;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun rc = psp_check_tee_status();
460*4882a593Smuzhiyun if (rc) {
461*4882a593Smuzhiyun pr_err("amd-tee driver: tee not present\n");
462*4882a593Smuzhiyun return rc;
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun drv_data = kzalloc(sizeof(*drv_data), GFP_KERNEL);
466*4882a593Smuzhiyun if (!drv_data)
467*4882a593Smuzhiyun return -ENOMEM;
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun amdtee = kzalloc(sizeof(*amdtee), GFP_KERNEL);
470*4882a593Smuzhiyun if (!amdtee) {
471*4882a593Smuzhiyun rc = -ENOMEM;
472*4882a593Smuzhiyun goto err_kfree_drv_data;
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun pool = amdtee_config_shm();
476*4882a593Smuzhiyun if (IS_ERR(pool)) {
477*4882a593Smuzhiyun pr_err("shared pool configuration error\n");
478*4882a593Smuzhiyun rc = PTR_ERR(pool);
479*4882a593Smuzhiyun goto err_kfree_amdtee;
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun teedev = tee_device_alloc(&amdtee_desc, NULL, pool, amdtee);
483*4882a593Smuzhiyun if (IS_ERR(teedev)) {
484*4882a593Smuzhiyun rc = PTR_ERR(teedev);
485*4882a593Smuzhiyun goto err_free_pool;
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun amdtee->teedev = teedev;
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun rc = tee_device_register(amdtee->teedev);
490*4882a593Smuzhiyun if (rc)
491*4882a593Smuzhiyun goto err_device_unregister;
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun amdtee->pool = pool;
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun drv_data->amdtee = amdtee;
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun pr_info("amd-tee driver initialization successful\n");
498*4882a593Smuzhiyun return 0;
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun err_device_unregister:
501*4882a593Smuzhiyun tee_device_unregister(amdtee->teedev);
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun err_free_pool:
504*4882a593Smuzhiyun tee_shm_pool_free(pool);
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun err_kfree_amdtee:
507*4882a593Smuzhiyun kfree(amdtee);
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun err_kfree_drv_data:
510*4882a593Smuzhiyun kfree(drv_data);
511*4882a593Smuzhiyun drv_data = NULL;
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun pr_err("amd-tee driver initialization failed\n");
514*4882a593Smuzhiyun return rc;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun module_init(amdtee_driver_init);
517*4882a593Smuzhiyun
amdtee_driver_exit(void)518*4882a593Smuzhiyun static void __exit amdtee_driver_exit(void)
519*4882a593Smuzhiyun {
520*4882a593Smuzhiyun struct amdtee *amdtee;
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun if (!drv_data || !drv_data->amdtee)
523*4882a593Smuzhiyun return;
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun amdtee = drv_data->amdtee;
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun tee_device_unregister(amdtee->teedev);
528*4882a593Smuzhiyun tee_shm_pool_free(amdtee->pool);
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun module_exit(amdtee_driver_exit);
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun MODULE_AUTHOR(DRIVER_AUTHOR);
533*4882a593Smuzhiyun MODULE_DESCRIPTION("AMD-TEE driver");
534*4882a593Smuzhiyun MODULE_VERSION("1.0");
535*4882a593Smuzhiyun MODULE_LICENSE("Dual MIT/GPL");
536