xref: /OK3568_Linux_fs/kernel/drivers/tee/optee/shm_pool.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2015, Linaro Limited
4*4882a593Smuzhiyun  * Copyright (c) 2017, EPAM Systems
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun #include <linux/device.h>
7*4882a593Smuzhiyun #include <linux/dma-buf.h>
8*4882a593Smuzhiyun #include <linux/genalloc.h>
9*4882a593Smuzhiyun #include <linux/slab.h>
10*4882a593Smuzhiyun #include <linux/tee_drv.h>
11*4882a593Smuzhiyun #include "optee_private.h"
12*4882a593Smuzhiyun #include "optee_smc.h"
13*4882a593Smuzhiyun #include "shm_pool.h"
14*4882a593Smuzhiyun 
pool_op_alloc(struct tee_shm_pool_mgr * poolm,struct tee_shm * shm,size_t size)15*4882a593Smuzhiyun static int pool_op_alloc(struct tee_shm_pool_mgr *poolm,
16*4882a593Smuzhiyun 			 struct tee_shm *shm, size_t size)
17*4882a593Smuzhiyun {
18*4882a593Smuzhiyun 	unsigned int order = get_order(size);
19*4882a593Smuzhiyun 	struct page *page;
20*4882a593Smuzhiyun 	int rc = 0;
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun 	page = alloc_pages(GFP_KERNEL | __GFP_ZERO, order);
23*4882a593Smuzhiyun 	if (!page)
24*4882a593Smuzhiyun 		return -ENOMEM;
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun 	shm->kaddr = page_address(page);
27*4882a593Smuzhiyun 	shm->paddr = page_to_phys(page);
28*4882a593Smuzhiyun 	shm->size = PAGE_SIZE << order;
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun 	/*
31*4882a593Smuzhiyun 	 * Shared memory private to the OP-TEE driver doesn't need
32*4882a593Smuzhiyun 	 * to be registered with OP-TEE.
33*4882a593Smuzhiyun 	 */
34*4882a593Smuzhiyun 	if (!(shm->flags & TEE_SHM_PRIV)) {
35*4882a593Smuzhiyun 		unsigned int nr_pages = 1 << order, i;
36*4882a593Smuzhiyun 		struct page **pages;
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun 		pages = kcalloc(nr_pages, sizeof(pages), GFP_KERNEL);
39*4882a593Smuzhiyun 		if (!pages) {
40*4882a593Smuzhiyun 			rc = -ENOMEM;
41*4882a593Smuzhiyun 			goto err;
42*4882a593Smuzhiyun 		}
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 		for (i = 0; i < nr_pages; i++)
45*4882a593Smuzhiyun 			pages[i] = page + i;
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 		shm->flags |= TEE_SHM_REGISTER;
48*4882a593Smuzhiyun 		rc = optee_shm_register(shm->ctx, shm, pages, nr_pages,
49*4882a593Smuzhiyun 					(unsigned long)shm->kaddr);
50*4882a593Smuzhiyun 		kfree(pages);
51*4882a593Smuzhiyun 		if (rc)
52*4882a593Smuzhiyun 			goto err;
53*4882a593Smuzhiyun 	}
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	return 0;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun err:
58*4882a593Smuzhiyun 	__free_pages(page, order);
59*4882a593Smuzhiyun 	return rc;
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun 
pool_op_free(struct tee_shm_pool_mgr * poolm,struct tee_shm * shm)62*4882a593Smuzhiyun static void pool_op_free(struct tee_shm_pool_mgr *poolm,
63*4882a593Smuzhiyun 			 struct tee_shm *shm)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun 	if (!(shm->flags & TEE_SHM_PRIV))
66*4882a593Smuzhiyun 		optee_shm_unregister(shm->ctx, shm);
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	free_pages((unsigned long)shm->kaddr, get_order(shm->size));
69*4882a593Smuzhiyun 	shm->kaddr = NULL;
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun 
pool_op_destroy_poolmgr(struct tee_shm_pool_mgr * poolm)72*4882a593Smuzhiyun static void pool_op_destroy_poolmgr(struct tee_shm_pool_mgr *poolm)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	kfree(poolm);
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun static const struct tee_shm_pool_mgr_ops pool_ops = {
78*4882a593Smuzhiyun 	.alloc = pool_op_alloc,
79*4882a593Smuzhiyun 	.free = pool_op_free,
80*4882a593Smuzhiyun 	.destroy_poolmgr = pool_op_destroy_poolmgr,
81*4882a593Smuzhiyun };
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun /**
84*4882a593Smuzhiyun  * optee_shm_pool_alloc_pages() - create page-based allocator pool
85*4882a593Smuzhiyun  *
86*4882a593Smuzhiyun  * This pool is used when OP-TEE supports dymanic SHM. In this case
87*4882a593Smuzhiyun  * command buffers and such are allocated from kernel's own memory.
88*4882a593Smuzhiyun  */
optee_shm_pool_alloc_pages(void)89*4882a593Smuzhiyun struct tee_shm_pool_mgr *optee_shm_pool_alloc_pages(void)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun 	struct tee_shm_pool_mgr *mgr = kzalloc(sizeof(*mgr), GFP_KERNEL);
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	if (!mgr)
94*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	mgr->ops = &pool_ops;
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	return mgr;
99*4882a593Smuzhiyun }
100