1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * seqno-fence, using a dma-buf to synchronize fencing
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2012 Texas Instruments
6*4882a593Smuzhiyun * Copyright (C) 2012 Canonical Ltd
7*4882a593Smuzhiyun * Authors:
8*4882a593Smuzhiyun * Rob Clark <robdclark@gmail.com>
9*4882a593Smuzhiyun * Maarten Lankhorst <maarten.lankhorst@canonical.com>
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #ifndef __LINUX_SEQNO_FENCE_H
13*4882a593Smuzhiyun #define __LINUX_SEQNO_FENCE_H
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include <linux/dma-fence.h>
16*4882a593Smuzhiyun #include <linux/dma-buf.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun enum seqno_fence_condition {
19*4882a593Smuzhiyun SEQNO_FENCE_WAIT_GEQUAL,
20*4882a593Smuzhiyun SEQNO_FENCE_WAIT_NONZERO
21*4882a593Smuzhiyun };
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun struct seqno_fence {
24*4882a593Smuzhiyun struct dma_fence base;
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun const struct dma_fence_ops *ops;
27*4882a593Smuzhiyun struct dma_buf *sync_buf;
28*4882a593Smuzhiyun uint32_t seqno_ofs;
29*4882a593Smuzhiyun enum seqno_fence_condition condition;
30*4882a593Smuzhiyun };
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun extern const struct dma_fence_ops seqno_fence_ops;
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun /**
35*4882a593Smuzhiyun * to_seqno_fence - cast a fence to a seqno_fence
36*4882a593Smuzhiyun * @fence: fence to cast to a seqno_fence
37*4882a593Smuzhiyun *
38*4882a593Smuzhiyun * Returns NULL if the fence is not a seqno_fence,
39*4882a593Smuzhiyun * or the seqno_fence otherwise.
40*4882a593Smuzhiyun */
41*4882a593Smuzhiyun static inline struct seqno_fence *
to_seqno_fence(struct dma_fence * fence)42*4882a593Smuzhiyun to_seqno_fence(struct dma_fence *fence)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun if (fence->ops != &seqno_fence_ops)
45*4882a593Smuzhiyun return NULL;
46*4882a593Smuzhiyun return container_of(fence, struct seqno_fence, base);
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /**
50*4882a593Smuzhiyun * seqno_fence_init - initialize a seqno fence
51*4882a593Smuzhiyun * @fence: seqno_fence to initialize
52*4882a593Smuzhiyun * @lock: pointer to spinlock to use for fence
53*4882a593Smuzhiyun * @sync_buf: buffer containing the memory location to signal on
54*4882a593Smuzhiyun * @context: the execution context this fence is a part of
55*4882a593Smuzhiyun * @seqno_ofs: the offset within @sync_buf
56*4882a593Smuzhiyun * @seqno: the sequence # to signal on
57*4882a593Smuzhiyun * @cond: fence wait condition
58*4882a593Smuzhiyun * @ops: the fence_ops for operations on this seqno fence
59*4882a593Smuzhiyun *
60*4882a593Smuzhiyun * This function initializes a struct seqno_fence with passed parameters,
61*4882a593Smuzhiyun * and takes a reference on sync_buf which is released on fence destruction.
62*4882a593Smuzhiyun *
63*4882a593Smuzhiyun * A seqno_fence is a dma_fence which can complete in software when
64*4882a593Smuzhiyun * enable_signaling is called, but it also completes when
65*4882a593Smuzhiyun * (s32)((sync_buf)[seqno_ofs] - seqno) >= 0 is true
66*4882a593Smuzhiyun *
67*4882a593Smuzhiyun * The seqno_fence will take a refcount on the sync_buf until it's
68*4882a593Smuzhiyun * destroyed, but actual lifetime of sync_buf may be longer if one of the
69*4882a593Smuzhiyun * callers take a reference to it.
70*4882a593Smuzhiyun *
71*4882a593Smuzhiyun * Certain hardware have instructions to insert this type of wait condition
72*4882a593Smuzhiyun * in the command stream, so no intervention from software would be needed.
73*4882a593Smuzhiyun * This type of fence can be destroyed before completed, however a reference
74*4882a593Smuzhiyun * on the sync_buf dma-buf can be taken. It is encouraged to re-use the same
75*4882a593Smuzhiyun * dma-buf for sync_buf, since mapping or unmapping the sync_buf to the
76*4882a593Smuzhiyun * device's vm can be expensive.
77*4882a593Smuzhiyun *
78*4882a593Smuzhiyun * It is recommended for creators of seqno_fence to call dma_fence_signal()
79*4882a593Smuzhiyun * before destruction. This will prevent possible issues from wraparound at
80*4882a593Smuzhiyun * time of issue vs time of check, since users can check dma_fence_is_signaled()
81*4882a593Smuzhiyun * before submitting instructions for the hardware to wait on the fence.
82*4882a593Smuzhiyun * However, when ops.enable_signaling is not called, it doesn't have to be
83*4882a593Smuzhiyun * done as soon as possible, just before there's any real danger of seqno
84*4882a593Smuzhiyun * wraparound.
85*4882a593Smuzhiyun */
86*4882a593Smuzhiyun static inline void
seqno_fence_init(struct seqno_fence * fence,spinlock_t * lock,struct dma_buf * sync_buf,uint32_t context,uint32_t seqno_ofs,uint32_t seqno,enum seqno_fence_condition cond,const struct dma_fence_ops * ops)87*4882a593Smuzhiyun seqno_fence_init(struct seqno_fence *fence, spinlock_t *lock,
88*4882a593Smuzhiyun struct dma_buf *sync_buf, uint32_t context,
89*4882a593Smuzhiyun uint32_t seqno_ofs, uint32_t seqno,
90*4882a593Smuzhiyun enum seqno_fence_condition cond,
91*4882a593Smuzhiyun const struct dma_fence_ops *ops)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun BUG_ON(!fence || !sync_buf || !ops);
94*4882a593Smuzhiyun BUG_ON(!ops->wait || !ops->enable_signaling ||
95*4882a593Smuzhiyun !ops->get_driver_name || !ops->get_timeline_name);
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun /*
98*4882a593Smuzhiyun * ops is used in dma_fence_init for get_driver_name, so needs to be
99*4882a593Smuzhiyun * initialized first
100*4882a593Smuzhiyun */
101*4882a593Smuzhiyun fence->ops = ops;
102*4882a593Smuzhiyun dma_fence_init(&fence->base, &seqno_fence_ops, lock, context, seqno);
103*4882a593Smuzhiyun get_dma_buf(sync_buf);
104*4882a593Smuzhiyun fence->sync_buf = sync_buf;
105*4882a593Smuzhiyun fence->seqno_ofs = seqno_ofs;
106*4882a593Smuzhiyun fence->condition = cond;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun #endif /* __LINUX_SEQNO_FENCE_H */
110