1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright © 2017 Intel Corporation
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * The above copyright notice and this permission notice (including the next
12*4882a593Smuzhiyun * paragraph) shall be included in all copies or substantial portions of the
13*4882a593Smuzhiyun * Software.
14*4882a593Smuzhiyun *
15*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
18*4882a593Smuzhiyun * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19*4882a593Smuzhiyun * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
20*4882a593Smuzhiyun * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
21*4882a593Smuzhiyun * IN THE SOFTWARE.
22*4882a593Smuzhiyun *
23*4882a593Smuzhiyun */
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #include <linux/slab.h>
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #include "i915_syncmap.h"
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #include "i915_gem.h" /* GEM_BUG_ON() */
30*4882a593Smuzhiyun #include "i915_selftest.h"
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #define SHIFT ilog2(KSYNCMAP)
33*4882a593Smuzhiyun #define MASK (KSYNCMAP - 1)
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun /*
36*4882a593Smuzhiyun * struct i915_syncmap is a layer of a radixtree that maps a u64 fence
37*4882a593Smuzhiyun * context id to the last u32 fence seqno waited upon from that context.
38*4882a593Smuzhiyun * Unlike lib/radixtree it uses a parent pointer that allows traversal back to
39*4882a593Smuzhiyun * the root. This allows us to access the whole tree via a single pointer
40*4882a593Smuzhiyun * to the most recently used layer. We expect fence contexts to be dense
41*4882a593Smuzhiyun * and most reuse to be on the same i915_gem_context but on neighbouring
42*4882a593Smuzhiyun * engines (i.e. on adjacent contexts) and reuse the same leaf, a very
43*4882a593Smuzhiyun * effective lookup cache. If the new lookup is not on the same leaf, we
44*4882a593Smuzhiyun * expect it to be on the neighbouring branch.
45*4882a593Smuzhiyun *
46*4882a593Smuzhiyun * A leaf holds an array of u32 seqno, and has height 0. The bitmap field
47*4882a593Smuzhiyun * allows us to store whether a particular seqno is valid (i.e. allows us
48*4882a593Smuzhiyun * to distinguish unset from 0).
49*4882a593Smuzhiyun *
50*4882a593Smuzhiyun * A branch holds an array of layer pointers, and has height > 0, and always
51*4882a593Smuzhiyun * has at least 2 layers (either branches or leaves) below it.
52*4882a593Smuzhiyun *
53*4882a593Smuzhiyun * For example,
54*4882a593Smuzhiyun * for x in
55*4882a593Smuzhiyun * 0 1 2 0x10 0x11 0x200 0x201
56*4882a593Smuzhiyun * 0x500000 0x500001 0x503000 0x503001
57*4882a593Smuzhiyun * 0xE<<60:
58*4882a593Smuzhiyun * i915_syncmap_set(&sync, x, lower_32_bits(x));
59*4882a593Smuzhiyun * will build a tree like:
60*4882a593Smuzhiyun * 0xXXXXXXXXXXXXXXXX
61*4882a593Smuzhiyun * 0-> 0x0000000000XXXXXX
62*4882a593Smuzhiyun * | 0-> 0x0000000000000XXX
63*4882a593Smuzhiyun * | | 0-> 0x00000000000000XX
64*4882a593Smuzhiyun * | | | 0-> 0x000000000000000X 0:0, 1:1, 2:2
65*4882a593Smuzhiyun * | | | 1-> 0x000000000000001X 0:10, 1:11
66*4882a593Smuzhiyun * | | 2-> 0x000000000000020X 0:200, 1:201
67*4882a593Smuzhiyun * | 5-> 0x000000000050XXXX
68*4882a593Smuzhiyun * | 0-> 0x000000000050000X 0:500000, 1:500001
69*4882a593Smuzhiyun * | 3-> 0x000000000050300X 0:503000, 1:503001
70*4882a593Smuzhiyun * e-> 0xe00000000000000X e:e
71*4882a593Smuzhiyun */
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun struct i915_syncmap {
74*4882a593Smuzhiyun u64 prefix;
75*4882a593Smuzhiyun unsigned int height;
76*4882a593Smuzhiyun unsigned int bitmap;
77*4882a593Smuzhiyun struct i915_syncmap *parent;
78*4882a593Smuzhiyun /*
79*4882a593Smuzhiyun * Following this header is an array of either seqno or child pointers:
80*4882a593Smuzhiyun * union {
81*4882a593Smuzhiyun * u32 seqno[KSYNCMAP];
82*4882a593Smuzhiyun * struct i915_syncmap *child[KSYNCMAP];
83*4882a593Smuzhiyun * };
84*4882a593Smuzhiyun */
85*4882a593Smuzhiyun };
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /**
88*4882a593Smuzhiyun * i915_syncmap_init -- initialise the #i915_syncmap
89*4882a593Smuzhiyun * @root: pointer to the #i915_syncmap
90*4882a593Smuzhiyun */
i915_syncmap_init(struct i915_syncmap ** root)91*4882a593Smuzhiyun void i915_syncmap_init(struct i915_syncmap **root)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun BUILD_BUG_ON_NOT_POWER_OF_2(KSYNCMAP);
94*4882a593Smuzhiyun BUILD_BUG_ON_NOT_POWER_OF_2(SHIFT);
95*4882a593Smuzhiyun BUILD_BUG_ON(KSYNCMAP > BITS_PER_TYPE((*root)->bitmap));
96*4882a593Smuzhiyun *root = NULL;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
__sync_seqno(struct i915_syncmap * p)99*4882a593Smuzhiyun static inline u32 *__sync_seqno(struct i915_syncmap *p)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun GEM_BUG_ON(p->height);
102*4882a593Smuzhiyun return (u32 *)(p + 1);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
__sync_child(struct i915_syncmap * p)105*4882a593Smuzhiyun static inline struct i915_syncmap **__sync_child(struct i915_syncmap *p)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun GEM_BUG_ON(!p->height);
108*4882a593Smuzhiyun return (struct i915_syncmap **)(p + 1);
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun static inline unsigned int
__sync_branch_idx(const struct i915_syncmap * p,u64 id)112*4882a593Smuzhiyun __sync_branch_idx(const struct i915_syncmap *p, u64 id)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun return (id >> p->height) & MASK;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun static inline unsigned int
__sync_leaf_idx(const struct i915_syncmap * p,u64 id)118*4882a593Smuzhiyun __sync_leaf_idx(const struct i915_syncmap *p, u64 id)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun GEM_BUG_ON(p->height);
121*4882a593Smuzhiyun return id & MASK;
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
__sync_branch_prefix(const struct i915_syncmap * p,u64 id)124*4882a593Smuzhiyun static inline u64 __sync_branch_prefix(const struct i915_syncmap *p, u64 id)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun return id >> p->height >> SHIFT;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
__sync_leaf_prefix(const struct i915_syncmap * p,u64 id)129*4882a593Smuzhiyun static inline u64 __sync_leaf_prefix(const struct i915_syncmap *p, u64 id)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun GEM_BUG_ON(p->height);
132*4882a593Smuzhiyun return id >> SHIFT;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun
seqno_later(u32 a,u32 b)135*4882a593Smuzhiyun static inline bool seqno_later(u32 a, u32 b)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun return (s32)(a - b) >= 0;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun /**
141*4882a593Smuzhiyun * i915_syncmap_is_later -- compare against the last know sync point
142*4882a593Smuzhiyun * @root: pointer to the #i915_syncmap
143*4882a593Smuzhiyun * @id: the context id (other timeline) we are synchronising to
144*4882a593Smuzhiyun * @seqno: the sequence number along the other timeline
145*4882a593Smuzhiyun *
146*4882a593Smuzhiyun * If we have already synchronised this @root timeline with another (@id) then
147*4882a593Smuzhiyun * we can omit any repeated or earlier synchronisation requests. If the two
148*4882a593Smuzhiyun * timelines are already coupled, we can also omit the dependency between the
149*4882a593Smuzhiyun * two as that is already known via the timeline.
150*4882a593Smuzhiyun *
151*4882a593Smuzhiyun * Returns true if the two timelines are already synchronised wrt to @seqno,
152*4882a593Smuzhiyun * false if not and the synchronisation must be emitted.
153*4882a593Smuzhiyun */
i915_syncmap_is_later(struct i915_syncmap ** root,u64 id,u32 seqno)154*4882a593Smuzhiyun bool i915_syncmap_is_later(struct i915_syncmap **root, u64 id, u32 seqno)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun struct i915_syncmap *p;
157*4882a593Smuzhiyun unsigned int idx;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun p = *root;
160*4882a593Smuzhiyun if (!p)
161*4882a593Smuzhiyun return false;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun if (likely(__sync_leaf_prefix(p, id) == p->prefix))
164*4882a593Smuzhiyun goto found;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun /* First climb the tree back to a parent branch */
167*4882a593Smuzhiyun do {
168*4882a593Smuzhiyun p = p->parent;
169*4882a593Smuzhiyun if (!p)
170*4882a593Smuzhiyun return false;
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun if (__sync_branch_prefix(p, id) == p->prefix)
173*4882a593Smuzhiyun break;
174*4882a593Smuzhiyun } while (1);
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun /* And then descend again until we find our leaf */
177*4882a593Smuzhiyun do {
178*4882a593Smuzhiyun if (!p->height)
179*4882a593Smuzhiyun break;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun p = __sync_child(p)[__sync_branch_idx(p, id)];
182*4882a593Smuzhiyun if (!p)
183*4882a593Smuzhiyun return false;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun if (__sync_branch_prefix(p, id) != p->prefix)
186*4882a593Smuzhiyun return false;
187*4882a593Smuzhiyun } while (1);
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun *root = p;
190*4882a593Smuzhiyun found:
191*4882a593Smuzhiyun idx = __sync_leaf_idx(p, id);
192*4882a593Smuzhiyun if (!(p->bitmap & BIT(idx)))
193*4882a593Smuzhiyun return false;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun return seqno_later(__sync_seqno(p)[idx], seqno);
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun static struct i915_syncmap *
__sync_alloc_leaf(struct i915_syncmap * parent,u64 id)199*4882a593Smuzhiyun __sync_alloc_leaf(struct i915_syncmap *parent, u64 id)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun struct i915_syncmap *p;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun p = kmalloc(sizeof(*p) + KSYNCMAP * sizeof(u32), GFP_KERNEL);
204*4882a593Smuzhiyun if (unlikely(!p))
205*4882a593Smuzhiyun return NULL;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun p->parent = parent;
208*4882a593Smuzhiyun p->height = 0;
209*4882a593Smuzhiyun p->bitmap = 0;
210*4882a593Smuzhiyun p->prefix = __sync_leaf_prefix(p, id);
211*4882a593Smuzhiyun return p;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun
__sync_set_seqno(struct i915_syncmap * p,u64 id,u32 seqno)214*4882a593Smuzhiyun static inline void __sync_set_seqno(struct i915_syncmap *p, u64 id, u32 seqno)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun unsigned int idx = __sync_leaf_idx(p, id);
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun p->bitmap |= BIT(idx);
219*4882a593Smuzhiyun __sync_seqno(p)[idx] = seqno;
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
__sync_set_child(struct i915_syncmap * p,unsigned int idx,struct i915_syncmap * child)222*4882a593Smuzhiyun static inline void __sync_set_child(struct i915_syncmap *p,
223*4882a593Smuzhiyun unsigned int idx,
224*4882a593Smuzhiyun struct i915_syncmap *child)
225*4882a593Smuzhiyun {
226*4882a593Smuzhiyun p->bitmap |= BIT(idx);
227*4882a593Smuzhiyun __sync_child(p)[idx] = child;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
__sync_set(struct i915_syncmap ** root,u64 id,u32 seqno)230*4882a593Smuzhiyun static noinline int __sync_set(struct i915_syncmap **root, u64 id, u32 seqno)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun struct i915_syncmap *p = *root;
233*4882a593Smuzhiyun unsigned int idx;
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun if (!p) {
236*4882a593Smuzhiyun p = __sync_alloc_leaf(NULL, id);
237*4882a593Smuzhiyun if (unlikely(!p))
238*4882a593Smuzhiyun return -ENOMEM;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun goto found;
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun /* Caller handled the likely cached case */
244*4882a593Smuzhiyun GEM_BUG_ON(__sync_leaf_prefix(p, id) == p->prefix);
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun /* Climb back up the tree until we find a common prefix */
247*4882a593Smuzhiyun do {
248*4882a593Smuzhiyun if (!p->parent)
249*4882a593Smuzhiyun break;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun p = p->parent;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun if (__sync_branch_prefix(p, id) == p->prefix)
254*4882a593Smuzhiyun break;
255*4882a593Smuzhiyun } while (1);
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun /*
258*4882a593Smuzhiyun * No shortcut, we have to descend the tree to find the right layer
259*4882a593Smuzhiyun * containing this fence.
260*4882a593Smuzhiyun *
261*4882a593Smuzhiyun * Each layer in the tree holds 16 (KSYNCMAP) pointers, either fences
262*4882a593Smuzhiyun * or lower layers. Leaf nodes (height = 0) contain the fences, all
263*4882a593Smuzhiyun * other nodes (height > 0) are internal layers that point to a lower
264*4882a593Smuzhiyun * node. Each internal layer has at least 2 descendents.
265*4882a593Smuzhiyun *
266*4882a593Smuzhiyun * Starting at the top, we check whether the current prefix matches. If
267*4882a593Smuzhiyun * it doesn't, we have gone past our target and need to insert a join
268*4882a593Smuzhiyun * into the tree, and a new leaf node for the target as a descendent
269*4882a593Smuzhiyun * of the join, as well as the original layer.
270*4882a593Smuzhiyun *
271*4882a593Smuzhiyun * The matching prefix means we are still following the right branch
272*4882a593Smuzhiyun * of the tree. If it has height 0, we have found our leaf and just
273*4882a593Smuzhiyun * need to replace the fence slot with ourselves. If the height is
274*4882a593Smuzhiyun * not zero, our slot contains the next layer in the tree (unless
275*4882a593Smuzhiyun * it is empty, in which case we can add ourselves as a new leaf).
276*4882a593Smuzhiyun * As descend the tree the prefix grows (and height decreases).
277*4882a593Smuzhiyun */
278*4882a593Smuzhiyun do {
279*4882a593Smuzhiyun struct i915_syncmap *next;
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun if (__sync_branch_prefix(p, id) != p->prefix) {
282*4882a593Smuzhiyun unsigned int above;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun /* Insert a join above the current layer */
285*4882a593Smuzhiyun next = kzalloc(sizeof(*next) + KSYNCMAP * sizeof(next),
286*4882a593Smuzhiyun GFP_KERNEL);
287*4882a593Smuzhiyun if (unlikely(!next))
288*4882a593Smuzhiyun return -ENOMEM;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun /* Compute the height at which these two diverge */
291*4882a593Smuzhiyun above = fls64(__sync_branch_prefix(p, id) ^ p->prefix);
292*4882a593Smuzhiyun above = round_up(above, SHIFT);
293*4882a593Smuzhiyun next->height = above + p->height;
294*4882a593Smuzhiyun next->prefix = __sync_branch_prefix(next, id);
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun /* Insert the join into the parent */
297*4882a593Smuzhiyun if (p->parent) {
298*4882a593Smuzhiyun idx = __sync_branch_idx(p->parent, id);
299*4882a593Smuzhiyun __sync_child(p->parent)[idx] = next;
300*4882a593Smuzhiyun GEM_BUG_ON(!(p->parent->bitmap & BIT(idx)));
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun next->parent = p->parent;
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun /* Compute the idx of the other branch, not our id! */
305*4882a593Smuzhiyun idx = p->prefix >> (above - SHIFT) & MASK;
306*4882a593Smuzhiyun __sync_set_child(next, idx, p);
307*4882a593Smuzhiyun p->parent = next;
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun /* Ascend to the join */
310*4882a593Smuzhiyun p = next;
311*4882a593Smuzhiyun } else {
312*4882a593Smuzhiyun if (!p->height)
313*4882a593Smuzhiyun break;
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun /* Descend into the next layer */
317*4882a593Smuzhiyun GEM_BUG_ON(!p->height);
318*4882a593Smuzhiyun idx = __sync_branch_idx(p, id);
319*4882a593Smuzhiyun next = __sync_child(p)[idx];
320*4882a593Smuzhiyun if (!next) {
321*4882a593Smuzhiyun next = __sync_alloc_leaf(p, id);
322*4882a593Smuzhiyun if (unlikely(!next))
323*4882a593Smuzhiyun return -ENOMEM;
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun __sync_set_child(p, idx, next);
326*4882a593Smuzhiyun p = next;
327*4882a593Smuzhiyun break;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun p = next;
331*4882a593Smuzhiyun } while (1);
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun found:
334*4882a593Smuzhiyun GEM_BUG_ON(p->prefix != __sync_leaf_prefix(p, id));
335*4882a593Smuzhiyun __sync_set_seqno(p, id, seqno);
336*4882a593Smuzhiyun *root = p;
337*4882a593Smuzhiyun return 0;
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun /**
341*4882a593Smuzhiyun * i915_syncmap_set -- mark the most recent syncpoint between contexts
342*4882a593Smuzhiyun * @root: pointer to the #i915_syncmap
343*4882a593Smuzhiyun * @id: the context id (other timeline) we have synchronised to
344*4882a593Smuzhiyun * @seqno: the sequence number along the other timeline
345*4882a593Smuzhiyun *
346*4882a593Smuzhiyun * When we synchronise this @root timeline with another (@id), we also know
347*4882a593Smuzhiyun * that we have synchronized with all previous seqno along that timeline. If
348*4882a593Smuzhiyun * we then have a request to synchronise with the same seqno or older, we can
349*4882a593Smuzhiyun * omit it, see i915_syncmap_is_later()
350*4882a593Smuzhiyun *
351*4882a593Smuzhiyun * Returns 0 on success, or a negative error code.
352*4882a593Smuzhiyun */
i915_syncmap_set(struct i915_syncmap ** root,u64 id,u32 seqno)353*4882a593Smuzhiyun int i915_syncmap_set(struct i915_syncmap **root, u64 id, u32 seqno)
354*4882a593Smuzhiyun {
355*4882a593Smuzhiyun struct i915_syncmap *p = *root;
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun /*
358*4882a593Smuzhiyun * We expect to be called in sequence following is_later(id), which
359*4882a593Smuzhiyun * should have preloaded the root for us.
360*4882a593Smuzhiyun */
361*4882a593Smuzhiyun if (likely(p && __sync_leaf_prefix(p, id) == p->prefix)) {
362*4882a593Smuzhiyun __sync_set_seqno(p, id, seqno);
363*4882a593Smuzhiyun return 0;
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun return __sync_set(root, id, seqno);
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun
__sync_free(struct i915_syncmap * p)369*4882a593Smuzhiyun static void __sync_free(struct i915_syncmap *p)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun if (p->height) {
372*4882a593Smuzhiyun unsigned int i;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun while ((i = ffs(p->bitmap))) {
375*4882a593Smuzhiyun p->bitmap &= ~0u << i;
376*4882a593Smuzhiyun __sync_free(__sync_child(p)[i - 1]);
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun kfree(p);
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun /**
384*4882a593Smuzhiyun * i915_syncmap_free -- free all memory associated with the syncmap
385*4882a593Smuzhiyun * @root: pointer to the #i915_syncmap
386*4882a593Smuzhiyun *
387*4882a593Smuzhiyun * Either when the timeline is to be freed and we no longer need the sync
388*4882a593Smuzhiyun * point tracking, or when the fences are all known to be signaled and the
389*4882a593Smuzhiyun * sync point tracking is redundant, we can free the #i915_syncmap to recover
390*4882a593Smuzhiyun * its allocations.
391*4882a593Smuzhiyun *
392*4882a593Smuzhiyun * Will reinitialise the @root pointer so that the #i915_syncmap is ready for
393*4882a593Smuzhiyun * reuse.
394*4882a593Smuzhiyun */
i915_syncmap_free(struct i915_syncmap ** root)395*4882a593Smuzhiyun void i915_syncmap_free(struct i915_syncmap **root)
396*4882a593Smuzhiyun {
397*4882a593Smuzhiyun struct i915_syncmap *p;
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun p = *root;
400*4882a593Smuzhiyun if (!p)
401*4882a593Smuzhiyun return;
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun while (p->parent)
404*4882a593Smuzhiyun p = p->parent;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun __sync_free(p);
407*4882a593Smuzhiyun *root = NULL;
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_DRM_I915_SELFTEST)
411*4882a593Smuzhiyun #include "selftests/i915_syncmap.c"
412*4882a593Smuzhiyun #endif
413