1 /*
2 * drm_sync_helper.c: software fence and helper functions for fences and
3 * reservations used for dma buffer access synchronization between drivers.
4 *
5 * Copyright 2014 Google, Inc.
6 *
7 * This software is licensed under the terms of the GNU General Public
8 * License version 2, as published by the Free Software Foundation, and
9 * may be copied, distributed, and modified under those terms.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 */
16
17 #include <linux/module.h>
18 #include <drm/drm_sync_helper.h>
19 #include <linux/slab.h>
20 #include <linux/reservation.h>
21
22 static DEFINE_SPINLOCK(sw_fence_lock);
23
drm_add_reservation(struct reservation_object * resv,struct reservation_object ** resvs,unsigned long * excl_resvs_bitmap,unsigned int * num_resvs,bool exclusive)24 void drm_add_reservation(struct reservation_object *resv,
25 struct reservation_object **resvs,
26 unsigned long *excl_resvs_bitmap,
27 unsigned int *num_resvs, bool exclusive)
28 {
29 unsigned int r;
30
31 for (r = 0; r < *num_resvs; r++) {
32 if (resvs[r] == resv)
33 return;
34 }
35 resvs[*num_resvs] = resv;
36 if (exclusive)
37 set_bit(*num_resvs, excl_resvs_bitmap);
38 (*num_resvs)++;
39 }
40 EXPORT_SYMBOL(drm_add_reservation);
41
drm_lock_reservations(struct reservation_object ** resvs,unsigned int num_resvs,struct ww_acquire_ctx * ctx)42 int drm_lock_reservations(struct reservation_object **resvs,
43 unsigned int num_resvs, struct ww_acquire_ctx *ctx)
44 {
45 unsigned int r;
46 struct reservation_object *slow_res = NULL;
47
48 ww_acquire_init(ctx, &reservation_ww_class);
49
50 retry:
51 for (r = 0; r < num_resvs; r++) {
52 int ret;
53 /* skip the resv we locked with slow lock */
54 if (resvs[r] == slow_res) {
55 slow_res = NULL;
56 continue;
57 }
58 ret = ww_mutex_lock(&resvs[r]->lock, ctx);
59 if (ret < 0) {
60 unsigned int slow_r = r;
61 /*
62 * undo all the locks we already done,
63 * in reverse order
64 */
65 while (r > 0) {
66 r--;
67 ww_mutex_unlock(&resvs[r]->lock);
68 }
69 if (slow_res)
70 ww_mutex_unlock(&slow_res->lock);
71 if (ret == -EDEADLK) {
72 slow_res = resvs[slow_r];
73 ww_mutex_lock_slow(&slow_res->lock, ctx);
74 goto retry;
75 }
76 ww_acquire_fini(ctx);
77 return ret;
78 }
79 }
80
81 ww_acquire_done(ctx);
82 return 0;
83 }
84 EXPORT_SYMBOL(drm_lock_reservations);
85
drm_unlock_reservations(struct reservation_object ** resvs,unsigned int num_resvs,struct ww_acquire_ctx * ctx)86 void drm_unlock_reservations(struct reservation_object **resvs,
87 unsigned int num_resvs,
88 struct ww_acquire_ctx *ctx)
89 {
90 unsigned int r;
91
92 for (r = 0; r < num_resvs; r++)
93 ww_mutex_unlock(&resvs[r]->lock);
94
95 ww_acquire_fini(ctx);
96 }
97 EXPORT_SYMBOL(drm_unlock_reservations);
98
reservation_cb_fence_cb(struct fence * fence,struct fence_cb * cb)99 static void reservation_cb_fence_cb(struct fence *fence, struct fence_cb *cb)
100 {
101 struct drm_reservation_fence_cb *rfcb =
102 container_of(cb, struct drm_reservation_fence_cb, base);
103 struct drm_reservation_cb *rcb = rfcb->parent;
104
105 if (atomic_dec_and_test(&rcb->count))
106 schedule_work(&rcb->work);
107 }
108
109 static void
reservation_cb_cleanup(struct drm_reservation_cb * rcb)110 reservation_cb_cleanup(struct drm_reservation_cb *rcb)
111 {
112 unsigned cb;
113
114 for (cb = 0; cb < rcb->num_fence_cbs; cb++) {
115 if (rcb->fence_cbs[cb]) {
116 fence_remove_callback(rcb->fence_cbs[cb]->fence,
117 &rcb->fence_cbs[cb]->base);
118 fence_put(rcb->fence_cbs[cb]->fence);
119 kfree(rcb->fence_cbs[cb]);
120 rcb->fence_cbs[cb] = NULL;
121 }
122 }
123 kfree(rcb->fence_cbs);
124 rcb->fence_cbs = NULL;
125 rcb->num_fence_cbs = 0;
126 }
127
reservation_cb_work(struct work_struct * pwork)128 static void reservation_cb_work(struct work_struct *pwork)
129 {
130 struct drm_reservation_cb *rcb =
131 container_of(pwork, struct drm_reservation_cb, work);
132 /*
133 * clean up everything before calling the callback, because the callback
134 * may free structure containing rcb and work_struct
135 */
136 reservation_cb_cleanup(rcb);
137 rcb->func(rcb, rcb->context);
138 }
139
140 static int
reservation_cb_add_fence_cb(struct drm_reservation_cb * rcb,struct fence * fence)141 reservation_cb_add_fence_cb(struct drm_reservation_cb *rcb, struct fence *fence)
142 {
143 int ret = 0;
144 struct drm_reservation_fence_cb *fence_cb;
145 struct drm_reservation_fence_cb **new_fence_cbs;
146
147 new_fence_cbs = krealloc(rcb->fence_cbs,
148 (rcb->num_fence_cbs + 1)
149 * sizeof(struct drm_reservation_fence_cb *),
150 GFP_KERNEL);
151 if (!new_fence_cbs)
152 return -ENOMEM;
153 rcb->fence_cbs = new_fence_cbs;
154
155 fence_cb = kzalloc(sizeof(struct drm_reservation_fence_cb), GFP_KERNEL);
156 if (!fence_cb)
157 return -ENOMEM;
158
159 /*
160 * do not want for fence to disappear on us while we are waiting for
161 * callback and we need it in case we want to remove callbacks
162 */
163 fence_get(fence);
164 fence_cb->fence = fence;
165 fence_cb->parent = rcb;
166 rcb->fence_cbs[rcb->num_fence_cbs] = fence_cb;
167 atomic_inc(&rcb->count);
168 ret = fence_add_callback(fence, &fence_cb->base,
169 reservation_cb_fence_cb);
170 if (ret == -ENOENT) {
171 /* already signaled */
172 atomic_dec(&rcb->count);
173 fence_put(fence_cb->fence);
174 kfree(fence_cb);
175 ret = 0;
176 } else if (ret < 0) {
177 atomic_dec(&rcb->count);
178 fence_put(fence_cb->fence);
179 kfree(fence_cb);
180 return ret;
181 } else {
182 rcb->num_fence_cbs++;
183 }
184 return ret;
185 }
186
187 void
drm_reservation_cb_init(struct drm_reservation_cb * rcb,drm_reservation_cb_func_t func,void * context)188 drm_reservation_cb_init(struct drm_reservation_cb *rcb,
189 drm_reservation_cb_func_t func, void *context)
190 {
191 INIT_WORK(&rcb->work, reservation_cb_work);
192 atomic_set(&rcb->count, 1);
193 rcb->num_fence_cbs = 0;
194 rcb->fence_cbs = NULL;
195 rcb->func = func;
196 rcb->context = context;
197 }
198 EXPORT_SYMBOL(drm_reservation_cb_init);
199
200 int
drm_reservation_cb_add(struct drm_reservation_cb * rcb,struct reservation_object * resv,bool exclusive)201 drm_reservation_cb_add(struct drm_reservation_cb *rcb,
202 struct reservation_object *resv, bool exclusive)
203 {
204 int ret = 0;
205 struct fence *fence;
206 unsigned shared_count = 0, f;
207 struct fence **shared_fences = NULL;
208
209 /* enum all the fences in the reservation and add callbacks */
210 ret = reservation_object_get_fences_rcu(resv, &fence,
211 &shared_count, &shared_fences);
212 if (ret < 0)
213 return ret;
214
215 if (fence) {
216 ret = reservation_cb_add_fence_cb(rcb, fence);
217 if (ret < 0) {
218 reservation_cb_cleanup(rcb);
219 goto error;
220 }
221 }
222
223 if (exclusive) {
224 for (f = 0; f < shared_count; f++) {
225 ret = reservation_cb_add_fence_cb(rcb,
226 shared_fences[f]);
227 if (ret < 0) {
228 reservation_cb_cleanup(rcb);
229 goto error;
230 }
231 }
232 }
233
234 error:
235 if (fence)
236 fence_put(fence);
237 if (shared_fences) {
238 for (f = 0; f < shared_count; f++)
239 fence_put(shared_fences[f]);
240 kfree(shared_fences);
241 }
242 return ret;
243 }
244 EXPORT_SYMBOL(drm_reservation_cb_add);
245
246 void
drm_reservation_cb_done(struct drm_reservation_cb * rcb)247 drm_reservation_cb_done(struct drm_reservation_cb *rcb)
248 {
249 /*
250 * we need to decrement from initial 1
251 * and trigger the callback in case all the
252 * fences were already triggered
253 */
254 if (atomic_dec_and_test(&rcb->count)) {
255 /*
256 * we could call the callback here directly but in case
257 * the callback function needs to lock the same mutex
258 * as our caller it could cause a deadlock, so it is
259 * safer to call it from a worker
260 */
261 schedule_work(&rcb->work);
262 }
263 }
264 EXPORT_SYMBOL(drm_reservation_cb_done);
265
266 void
drm_reservation_cb_fini(struct drm_reservation_cb * rcb)267 drm_reservation_cb_fini(struct drm_reservation_cb *rcb)
268 {
269 /* make sure no work will be triggered */
270 atomic_set(&rcb->count, 0);
271 cancel_work_sync(&rcb->work);
272 reservation_cb_cleanup(rcb);
273 }
274 EXPORT_SYMBOL(drm_reservation_cb_fini);
275
sw_fence_enable_signaling(struct fence * f)276 static bool sw_fence_enable_signaling(struct fence *f)
277 {
278 return true;
279 }
280
sw_fence_get_get_driver_name(struct fence * fence)281 static const char *sw_fence_get_get_driver_name(struct fence *fence)
282 {
283 return "drm_sync_helper";
284 }
285
sw_fence_get_timeline_name(struct fence * f)286 static const char *sw_fence_get_timeline_name(struct fence *f)
287 {
288 return "drm_sync.sw";
289 }
290
291 static const struct fence_ops sw_fence_ops = {
292 .get_driver_name = sw_fence_get_get_driver_name,
293 .get_timeline_name = sw_fence_get_timeline_name,
294 .enable_signaling = sw_fence_enable_signaling,
295 .signaled = NULL,
296 .wait = fence_default_wait,
297 .release = NULL
298 };
299
drm_sw_fence_new(unsigned int context,unsigned seqno)300 struct fence *drm_sw_fence_new(unsigned int context, unsigned seqno)
301 {
302 struct fence *fence;
303
304 fence = kzalloc(sizeof(*fence), GFP_KERNEL);
305 if (!fence)
306 return ERR_PTR(-ENOMEM);
307 fence_init(fence,
308 &sw_fence_ops,
309 &sw_fence_lock,
310 context, seqno);
311
312 return fence;
313 }
314 EXPORT_SYMBOL(drm_sw_fence_new);
315