xref: /OK3568_Linux_fs/kernel/drivers/dma-buf/dma-buf-cache.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0
2 /*
3  * Copyright (c) 2021 Rockchip Electronics Co. Ltd.
4  */
5 
6 #include <linux/slab.h>
7 #include <linux/dma-buf.h>
8 #undef CONFIG_DMABUF_CACHE
9 #include <linux/dma-buf-cache.h>
10 
11 /* NOTE: dma-buf-cache APIs are not irq safe, please DO NOT run in irq context !! */
12 
13 struct dma_buf_cache_list {
14 	struct list_head head;
15 };
16 
17 struct dma_buf_cache {
18 	struct list_head list;
19 	struct dma_buf_attachment *attach;
20 	enum dma_data_direction direction;
21 	struct sg_table *sg_table;
22 };
23 
dma_buf_cache_destructor(struct dma_buf * dmabuf,void * dtor_data)24 static int dma_buf_cache_destructor(struct dma_buf *dmabuf, void *dtor_data)
25 {
26 	struct dma_buf_cache_list *data;
27 	struct dma_buf_cache *cache, *tmp;
28 
29 	mutex_lock(&dmabuf->cache_lock);
30 
31 	data = dmabuf->dtor_data;
32 
33 	list_for_each_entry_safe(cache, tmp, &data->head, list) {
34 		if (!IS_ERR_OR_NULL(cache->sg_table))
35 			dma_buf_unmap_attachment(cache->attach,
36 						 cache->sg_table,
37 						 cache->direction);
38 
39 		dma_buf_detach(dmabuf, cache->attach);
40 		list_del(&cache->list);
41 		kfree(cache);
42 	}
43 
44 	mutex_unlock(&dmabuf->cache_lock);
45 
46 	kfree(data);
47 	return 0;
48 }
49 
50 static struct dma_buf_cache *
dma_buf_cache_get_cache(struct dma_buf_attachment * attach)51 dma_buf_cache_get_cache(struct dma_buf_attachment *attach)
52 {
53 	struct dma_buf_cache_list *data;
54 	struct dma_buf_cache *cache;
55 	struct dma_buf *dmabuf = attach->dmabuf;
56 
57 	if (dmabuf->dtor != dma_buf_cache_destructor)
58 		return NULL;
59 
60 	data = dmabuf->dtor_data;
61 
62 	list_for_each_entry(cache, &data->head, list) {
63 		if (cache->attach == attach)
64 			return cache;
65 	}
66 
67 	return NULL;
68 }
69 
dma_buf_cache_detach(struct dma_buf * dmabuf,struct dma_buf_attachment * attach)70 void dma_buf_cache_detach(struct dma_buf *dmabuf,
71 			  struct dma_buf_attachment *attach)
72 {
73 	struct dma_buf_cache *cache;
74 
75 	mutex_lock(&dmabuf->cache_lock);
76 
77 	cache = dma_buf_cache_get_cache(attach);
78 	if (!cache)
79 		dma_buf_detach(dmabuf, attach);
80 
81 	mutex_unlock(&dmabuf->cache_lock);
82 }
83 EXPORT_SYMBOL(dma_buf_cache_detach);
84 
dma_buf_cache_attach(struct dma_buf * dmabuf,struct device * dev)85 struct dma_buf_attachment *dma_buf_cache_attach(struct dma_buf *dmabuf,
86 						struct device *dev)
87 {
88 	struct dma_buf_attachment *attach;
89 	struct dma_buf_cache_list *data;
90 	struct dma_buf_cache *cache;
91 
92 	mutex_lock(&dmabuf->cache_lock);
93 
94 	if (!dmabuf->dtor) {
95 		data = kzalloc(sizeof(*data), GFP_KERNEL);
96 		if (!data) {
97 			attach = ERR_PTR(-ENOMEM);
98 			goto err_data;
99 		}
100 		INIT_LIST_HEAD(&data->head);
101 		dma_buf_set_destructor(dmabuf, dma_buf_cache_destructor, data);
102 	}
103 
104 	if (dmabuf->dtor && dmabuf->dtor != dma_buf_cache_destructor) {
105 		attach = dma_buf_attach(dmabuf, dev);
106 		goto attach_done;
107 	}
108 
109 	data = dmabuf->dtor_data;
110 
111 	list_for_each_entry(cache, &data->head, list) {
112 		if (cache->attach->dev == dev) {
113 			/* Already attached */
114 			attach = cache->attach;
115 			goto attach_done;
116 		}
117 	}
118 
119 	cache = kzalloc(sizeof(*cache), GFP_KERNEL);
120 	if (!cache) {
121 		attach = ERR_PTR(-ENOMEM);
122 		goto err_cache;
123 	}
124 	/* Cache attachment */
125 	attach = dma_buf_attach(dmabuf, dev);
126 	if (IS_ERR_OR_NULL(attach))
127 		goto err_attach;
128 
129 	cache->attach = attach;
130 	list_add(&cache->list, &data->head);
131 
132 attach_done:
133 	mutex_unlock(&dmabuf->cache_lock);
134 	return attach;
135 
136 err_attach:
137 	kfree(cache);
138 err_cache:
139 	kfree(data);
140 	dma_buf_set_destructor(dmabuf, NULL, NULL);
141 err_data:
142 	mutex_unlock(&dmabuf->cache_lock);
143 	return attach;
144 }
145 EXPORT_SYMBOL(dma_buf_cache_attach);
146 
dma_buf_cache_unmap_attachment(struct dma_buf_attachment * attach,struct sg_table * sg_table,enum dma_data_direction direction)147 void dma_buf_cache_unmap_attachment(struct dma_buf_attachment *attach,
148 				    struct sg_table *sg_table,
149 				    enum dma_data_direction direction)
150 {
151 	struct dma_buf *dmabuf = attach->dmabuf;
152 	struct dma_buf_cache *cache;
153 
154 	mutex_lock(&dmabuf->cache_lock);
155 
156 	cache = dma_buf_cache_get_cache(attach);
157 	if (!cache)
158 		dma_buf_unmap_attachment(attach, sg_table, direction);
159 
160 	mutex_unlock(&dmabuf->cache_lock);
161 }
162 EXPORT_SYMBOL(dma_buf_cache_unmap_attachment);
163 
dma_buf_cache_map_attachment(struct dma_buf_attachment * attach,enum dma_data_direction direction)164 struct sg_table *dma_buf_cache_map_attachment(struct dma_buf_attachment *attach,
165 					      enum dma_data_direction direction)
166 {
167 	struct dma_buf *dmabuf = attach->dmabuf;
168 	struct dma_buf_cache *cache;
169 	struct sg_table *sg_table;
170 
171 	mutex_lock(&dmabuf->cache_lock);
172 
173 	cache = dma_buf_cache_get_cache(attach);
174 	if (!cache) {
175 		sg_table = dma_buf_map_attachment(attach, direction);
176 		goto map_done;
177 	}
178 	if (cache->sg_table) {
179 		/* Already mapped */
180 		if (cache->direction == direction) {
181 			sg_table = cache->sg_table;
182 			goto map_done;
183 		}
184 		/* Different directions */
185 		dma_buf_unmap_attachment(attach, cache->sg_table,
186 					 cache->direction);
187 	}
188 
189 	/* Cache map */
190 	sg_table = dma_buf_map_attachment(attach, direction);
191 	cache->sg_table = sg_table;
192 	cache->direction = direction;
193 
194 map_done:
195 	mutex_unlock(&dmabuf->cache_lock);
196 	return sg_table;
197 }
198 EXPORT_SYMBOL(dma_buf_cache_map_attachment);
199