xref: /OK3568_Linux_fs/kernel/include/drm/drm_sync_helper.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * drm_sync_helper.h: software fence and helper functions for fences and
3*4882a593Smuzhiyun  * reservations used for dma buffer access synchronization between drivers.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright 2014 Google, Inc.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * This software is licensed under the terms of the GNU General Public
8*4882a593Smuzhiyun  * License version 2, as published by the Free Software Foundation, and
9*4882a593Smuzhiyun  * may be copied, distributed, and modified under those terms.
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * This program is distributed in the hope that it will be useful,
12*4882a593Smuzhiyun  * but WITHOUT ANY WARRANTY; without even the implied warranty of
13*4882a593Smuzhiyun  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14*4882a593Smuzhiyun  * GNU General Public License for more details.
15*4882a593Smuzhiyun  */
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #ifndef _DRM_SYNC_HELPER_H_
18*4882a593Smuzhiyun #define _DRM_SYNC_HELPER_H_
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #include <linux/fence.h>
21*4882a593Smuzhiyun #include <linux/reservation.h>
22*4882a593Smuzhiyun #include <linux/atomic.h>
23*4882a593Smuzhiyun #include <linux/workqueue.h>
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun /**
26*4882a593Smuzhiyun  * Create software fence
27*4882a593Smuzhiyun  * @context: execution context
28*4882a593Smuzhiyun  * @seqno: the sequence number of this fence inside the execution context
29*4882a593Smuzhiyun  */
30*4882a593Smuzhiyun struct fence *drm_sw_fence_new(unsigned int context,
31*4882a593Smuzhiyun 			unsigned seqno);
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun /**
34*4882a593Smuzhiyun  * Signal and decrease reference count for a fence if it exists
35*4882a593Smuzhiyun  * @fence: fence to signal
36*4882a593Smuzhiyun  *
37*4882a593Smuzhiyun  * Utility function called when owner access to object associated with fence is
38*4882a593Smuzhiyun  * finished (e.g. GPU done with rendering).
39*4882a593Smuzhiyun  */
drm_fence_signal_and_put(struct fence ** fence)40*4882a593Smuzhiyun static inline void drm_fence_signal_and_put(struct fence **fence)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun 	if (*fence) {
43*4882a593Smuzhiyun 		fence_signal(*fence);
44*4882a593Smuzhiyun 		fence_put(*fence);
45*4882a593Smuzhiyun 		*fence = NULL;
46*4882a593Smuzhiyun 	}
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun struct drm_reservation_cb;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun struct drm_reservation_fence_cb {
52*4882a593Smuzhiyun 	struct fence_cb base;
53*4882a593Smuzhiyun 	struct drm_reservation_cb *parent;
54*4882a593Smuzhiyun 	struct fence *fence;
55*4882a593Smuzhiyun };
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun /**
58*4882a593Smuzhiyun  * Callback executed when all fences in reservation callback are signaled
59*4882a593Smuzhiyun  * @rcb: reservation callback structure
60*4882a593Smuzhiyun  * @context: context provided by user at init time
61*4882a593Smuzhiyun  */
62*4882a593Smuzhiyun typedef void (*drm_reservation_cb_func_t)(struct drm_reservation_cb *rcb,
63*4882a593Smuzhiyun 					  void *context);
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun /**
66*4882a593Smuzhiyun  * Reservation callback structure
67*4882a593Smuzhiyun  * @work: work context in which func is executed
68*4882a593Smuzhiyun  * @fence_cbs: fence callbacks array
69*4882a593Smuzhiyun  * @num_fence_cbs: number of fence callbacks
70*4882a593Smuzhiyun  * @count: count of signaled fences, when it drops to 0 func is called
71*4882a593Smuzhiyun  * @func: callback to execute when all fences are signaled
72*4882a593Smuzhiyun  * @context: context provided by user during initialization
73*4882a593Smuzhiyun  *
74*4882a593Smuzhiyun  * It is safe and expected that func will destroy this structure before
75*4882a593Smuzhiyun  * returning.
76*4882a593Smuzhiyun  */
77*4882a593Smuzhiyun struct drm_reservation_cb {
78*4882a593Smuzhiyun 	struct work_struct work;
79*4882a593Smuzhiyun 	struct drm_reservation_fence_cb **fence_cbs;
80*4882a593Smuzhiyun 	unsigned num_fence_cbs;
81*4882a593Smuzhiyun 	atomic_t count;
82*4882a593Smuzhiyun 	void *context;
83*4882a593Smuzhiyun 	drm_reservation_cb_func_t func;
84*4882a593Smuzhiyun };
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun /**
87*4882a593Smuzhiyun  * Initialize reservation callback
88*4882a593Smuzhiyun  * @rcb: reservation callback structure to initialize
89*4882a593Smuzhiyun  * @func: function to call when all fences are signaled
90*4882a593Smuzhiyun  * @context: parameter to call func with
91*4882a593Smuzhiyun  */
92*4882a593Smuzhiyun void drm_reservation_cb_init(struct drm_reservation_cb *rcb,
93*4882a593Smuzhiyun 			     drm_reservation_cb_func_t func,
94*4882a593Smuzhiyun 			     void *context);
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun /**
97*4882a593Smuzhiyun  * Add fences from reservation object to callback
98*4882a593Smuzhiyun  * @rcb: reservation callback structure
99*4882a593Smuzhiyun  * @resv: reservation object
100*4882a593Smuzhiyun  * @exclusive: (for exclusive wait) when true add all fences, otherwise only
101*4882a593Smuzhiyun  *    exclusive fence
102*4882a593Smuzhiyun  */
103*4882a593Smuzhiyun int drm_reservation_cb_add(struct drm_reservation_cb *rcb,
104*4882a593Smuzhiyun 			   struct reservation_object *resv,
105*4882a593Smuzhiyun 			   bool exclusive);
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun /**
108*4882a593Smuzhiyun  * Finish adding fences
109*4882a593Smuzhiyun  * @rcb: reservation callback structure
110*4882a593Smuzhiyun  *
111*4882a593Smuzhiyun  * It will trigger callback worker if all fences were signaled before.
112*4882a593Smuzhiyun  */
113*4882a593Smuzhiyun void drm_reservation_cb_done(struct drm_reservation_cb *rcb);
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun /**
116*4882a593Smuzhiyun  * Cleanup reservation callback structure
117*4882a593Smuzhiyun  * @rcb: reservation callback structure
118*4882a593Smuzhiyun  *
119*4882a593Smuzhiyun  * Can be called to cancel primed reservation callback.
120*4882a593Smuzhiyun  */
121*4882a593Smuzhiyun void drm_reservation_cb_fini(struct drm_reservation_cb *rcb);
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun /**
124*4882a593Smuzhiyun  * Add reservation to array of reservations
125*4882a593Smuzhiyun  * @resv: reservation to add
126*4882a593Smuzhiyun  * @resvs: array of reservations
127*4882a593Smuzhiyun  * @excl_resvs_bitmap: bitmap for exclusive reservations
128*4882a593Smuzhiyun  * @num_resvs: number of reservations in array
129*4882a593Smuzhiyun  * @exclusive: bool to store in excl_resvs_bitmap
130*4882a593Smuzhiyun  */
131*4882a593Smuzhiyun void
132*4882a593Smuzhiyun drm_add_reservation(struct reservation_object *resv,
133*4882a593Smuzhiyun 			struct reservation_object **resvs,
134*4882a593Smuzhiyun 			unsigned long *excl_resvs_bitmap,
135*4882a593Smuzhiyun 			unsigned int *num_resvs, bool exclusive);
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun /**
138*4882a593Smuzhiyun  * Acquire ww_mutex lock on all reservations in the array
139*4882a593Smuzhiyun  * @resvs: array of reservations
140*4882a593Smuzhiyun  * @num_resvs: number of reservations in the array
141*4882a593Smuzhiyun  * @ctx: ww mutex context
142*4882a593Smuzhiyun  */
143*4882a593Smuzhiyun int drm_lock_reservations(struct reservation_object **resvs,
144*4882a593Smuzhiyun 			unsigned int num_resvs, struct ww_acquire_ctx *ctx);
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun /**
147*4882a593Smuzhiyun  * Release ww_mutex lock on all reservations in the array
148*4882a593Smuzhiyun  * @resvs: array of reservations
149*4882a593Smuzhiyun  * @num_resvs: number of reservations in the array
150*4882a593Smuzhiyun  * @ctx: ww mutex context
151*4882a593Smuzhiyun  */
152*4882a593Smuzhiyun void drm_unlock_reservations(struct reservation_object **resvs,
153*4882a593Smuzhiyun 				unsigned int num_resvs,
154*4882a593Smuzhiyun 				struct ww_acquire_ctx *ctx);
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun #endif
157