xref: /OK3568_Linux_fs/kernel/drivers/dma/dmaengine.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * The contents of this file are private to DMA engine drivers, and is not
4*4882a593Smuzhiyun  * part of the API to be used by DMA engine users.
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun #ifndef DMAENGINE_H
7*4882a593Smuzhiyun #define DMAENGINE_H
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/bug.h>
10*4882a593Smuzhiyun #include <linux/dmaengine.h>
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun /**
13*4882a593Smuzhiyun  * dma_cookie_init - initialize the cookies for a DMA channel
14*4882a593Smuzhiyun  * @chan: dma channel to initialize
15*4882a593Smuzhiyun  */
dma_cookie_init(struct dma_chan * chan)16*4882a593Smuzhiyun static inline void dma_cookie_init(struct dma_chan *chan)
17*4882a593Smuzhiyun {
18*4882a593Smuzhiyun 	chan->cookie = DMA_MIN_COOKIE;
19*4882a593Smuzhiyun 	chan->completed_cookie = DMA_MIN_COOKIE;
20*4882a593Smuzhiyun }
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun /**
23*4882a593Smuzhiyun  * dma_cookie_assign - assign a DMA engine cookie to the descriptor
24*4882a593Smuzhiyun  * @tx: descriptor needing cookie
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  * Assign a unique non-zero per-channel cookie to the descriptor.
27*4882a593Smuzhiyun  * Note: caller is expected to hold a lock to prevent concurrency.
28*4882a593Smuzhiyun  */
dma_cookie_assign(struct dma_async_tx_descriptor * tx)29*4882a593Smuzhiyun static inline dma_cookie_t dma_cookie_assign(struct dma_async_tx_descriptor *tx)
30*4882a593Smuzhiyun {
31*4882a593Smuzhiyun 	struct dma_chan *chan = tx->chan;
32*4882a593Smuzhiyun 	dma_cookie_t cookie;
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun 	cookie = chan->cookie + 1;
35*4882a593Smuzhiyun 	if (cookie < DMA_MIN_COOKIE)
36*4882a593Smuzhiyun 		cookie = DMA_MIN_COOKIE;
37*4882a593Smuzhiyun 	tx->cookie = chan->cookie = cookie;
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun 	return cookie;
40*4882a593Smuzhiyun }
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun /**
43*4882a593Smuzhiyun  * dma_cookie_complete - complete a descriptor
44*4882a593Smuzhiyun  * @tx: descriptor to complete
45*4882a593Smuzhiyun  *
46*4882a593Smuzhiyun  * Mark this descriptor complete by updating the channels completed
47*4882a593Smuzhiyun  * cookie marker.  Zero the descriptors cookie to prevent accidental
48*4882a593Smuzhiyun  * repeated completions.
49*4882a593Smuzhiyun  *
50*4882a593Smuzhiyun  * Note: caller is expected to hold a lock to prevent concurrency.
51*4882a593Smuzhiyun  */
dma_cookie_complete(struct dma_async_tx_descriptor * tx)52*4882a593Smuzhiyun static inline void dma_cookie_complete(struct dma_async_tx_descriptor *tx)
53*4882a593Smuzhiyun {
54*4882a593Smuzhiyun 	BUG_ON(tx->cookie < DMA_MIN_COOKIE);
55*4882a593Smuzhiyun 	tx->chan->completed_cookie = tx->cookie;
56*4882a593Smuzhiyun 	tx->cookie = 0;
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun /**
60*4882a593Smuzhiyun  * dma_cookie_status - report cookie status
61*4882a593Smuzhiyun  * @chan: dma channel
62*4882a593Smuzhiyun  * @cookie: cookie we are interested in
63*4882a593Smuzhiyun  * @state: dma_tx_state structure to return last/used cookies
64*4882a593Smuzhiyun  *
65*4882a593Smuzhiyun  * Report the status of the cookie, filling in the state structure if
66*4882a593Smuzhiyun  * non-NULL.  No locking is required.
67*4882a593Smuzhiyun  */
dma_cookie_status(struct dma_chan * chan,dma_cookie_t cookie,struct dma_tx_state * state)68*4882a593Smuzhiyun static inline enum dma_status dma_cookie_status(struct dma_chan *chan,
69*4882a593Smuzhiyun 	dma_cookie_t cookie, struct dma_tx_state *state)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun 	dma_cookie_t used, complete;
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	used = chan->cookie;
74*4882a593Smuzhiyun 	complete = chan->completed_cookie;
75*4882a593Smuzhiyun 	barrier();
76*4882a593Smuzhiyun 	if (state) {
77*4882a593Smuzhiyun 		state->last = complete;
78*4882a593Smuzhiyun 		state->used = used;
79*4882a593Smuzhiyun 		state->residue = 0;
80*4882a593Smuzhiyun 		state->in_flight_bytes = 0;
81*4882a593Smuzhiyun 	}
82*4882a593Smuzhiyun 	return dma_async_is_complete(cookie, complete, used);
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun 
dma_set_residue(struct dma_tx_state * state,u32 residue)85*4882a593Smuzhiyun static inline void dma_set_residue(struct dma_tx_state *state, u32 residue)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun 	if (state)
88*4882a593Smuzhiyun 		state->residue = residue;
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun 
dma_set_in_flight_bytes(struct dma_tx_state * state,u32 in_flight_bytes)91*4882a593Smuzhiyun static inline void dma_set_in_flight_bytes(struct dma_tx_state *state,
92*4882a593Smuzhiyun 					   u32 in_flight_bytes)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun 	if (state)
95*4882a593Smuzhiyun 		state->in_flight_bytes = in_flight_bytes;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun struct dmaengine_desc_callback {
99*4882a593Smuzhiyun 	dma_async_tx_callback callback;
100*4882a593Smuzhiyun 	dma_async_tx_callback_result callback_result;
101*4882a593Smuzhiyun 	void *callback_param;
102*4882a593Smuzhiyun };
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun /**
105*4882a593Smuzhiyun  * dmaengine_desc_get_callback - get the passed in callback function
106*4882a593Smuzhiyun  * @tx: tx descriptor
107*4882a593Smuzhiyun  * @cb: temp struct to hold the callback info
108*4882a593Smuzhiyun  *
109*4882a593Smuzhiyun  * Fill the passed in cb struct with what's available in the passed in
110*4882a593Smuzhiyun  * tx descriptor struct
111*4882a593Smuzhiyun  * No locking is required.
112*4882a593Smuzhiyun  */
113*4882a593Smuzhiyun static inline void
dmaengine_desc_get_callback(struct dma_async_tx_descriptor * tx,struct dmaengine_desc_callback * cb)114*4882a593Smuzhiyun dmaengine_desc_get_callback(struct dma_async_tx_descriptor *tx,
115*4882a593Smuzhiyun 			    struct dmaengine_desc_callback *cb)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	cb->callback = tx->callback;
118*4882a593Smuzhiyun 	cb->callback_result = tx->callback_result;
119*4882a593Smuzhiyun 	cb->callback_param = tx->callback_param;
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun /**
123*4882a593Smuzhiyun  * dmaengine_desc_callback_invoke - call the callback function in cb struct
124*4882a593Smuzhiyun  * @cb: temp struct that is holding the callback info
125*4882a593Smuzhiyun  * @result: transaction result
126*4882a593Smuzhiyun  *
127*4882a593Smuzhiyun  * Call the callback function provided in the cb struct with the parameter
128*4882a593Smuzhiyun  * in the cb struct.
129*4882a593Smuzhiyun  * Locking is dependent on the driver.
130*4882a593Smuzhiyun  */
131*4882a593Smuzhiyun static inline void
dmaengine_desc_callback_invoke(struct dmaengine_desc_callback * cb,const struct dmaengine_result * result)132*4882a593Smuzhiyun dmaengine_desc_callback_invoke(struct dmaengine_desc_callback *cb,
133*4882a593Smuzhiyun 			       const struct dmaengine_result *result)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun 	struct dmaengine_result dummy_result = {
136*4882a593Smuzhiyun 		.result = DMA_TRANS_NOERROR,
137*4882a593Smuzhiyun 		.residue = 0
138*4882a593Smuzhiyun 	};
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	if (cb->callback_result) {
141*4882a593Smuzhiyun 		if (!result)
142*4882a593Smuzhiyun 			result = &dummy_result;
143*4882a593Smuzhiyun 		cb->callback_result(cb->callback_param, result);
144*4882a593Smuzhiyun 	} else if (cb->callback) {
145*4882a593Smuzhiyun 		cb->callback(cb->callback_param);
146*4882a593Smuzhiyun 	}
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun /**
150*4882a593Smuzhiyun  * dmaengine_desc_get_callback_invoke - get the callback in tx descriptor and
151*4882a593Smuzhiyun  * 					then immediately call the callback.
152*4882a593Smuzhiyun  * @tx: dma async tx descriptor
153*4882a593Smuzhiyun  * @result: transaction result
154*4882a593Smuzhiyun  *
155*4882a593Smuzhiyun  * Call dmaengine_desc_get_callback() and dmaengine_desc_callback_invoke()
156*4882a593Smuzhiyun  * in a single function since no work is necessary in between for the driver.
157*4882a593Smuzhiyun  * Locking is dependent on the driver.
158*4882a593Smuzhiyun  */
159*4882a593Smuzhiyun static inline void
dmaengine_desc_get_callback_invoke(struct dma_async_tx_descriptor * tx,const struct dmaengine_result * result)160*4882a593Smuzhiyun dmaengine_desc_get_callback_invoke(struct dma_async_tx_descriptor *tx,
161*4882a593Smuzhiyun 				   const struct dmaengine_result *result)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun 	struct dmaengine_desc_callback cb;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	dmaengine_desc_get_callback(tx, &cb);
166*4882a593Smuzhiyun 	dmaengine_desc_callback_invoke(&cb, result);
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun 
169*4882a593Smuzhiyun /**
170*4882a593Smuzhiyun  * dmaengine_desc_callback_valid - verify the callback is valid in cb
171*4882a593Smuzhiyun  * @cb: callback info struct
172*4882a593Smuzhiyun  *
173*4882a593Smuzhiyun  * Return a bool that verifies whether callback in cb is valid or not.
174*4882a593Smuzhiyun  * No locking is required.
175*4882a593Smuzhiyun  */
176*4882a593Smuzhiyun static inline bool
dmaengine_desc_callback_valid(struct dmaengine_desc_callback * cb)177*4882a593Smuzhiyun dmaengine_desc_callback_valid(struct dmaengine_desc_callback *cb)
178*4882a593Smuzhiyun {
179*4882a593Smuzhiyun 	return cb->callback || cb->callback_result;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun struct dma_chan *dma_get_slave_channel(struct dma_chan *chan);
183*4882a593Smuzhiyun struct dma_chan *dma_get_any_slave_channel(struct dma_device *device);
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
186*4882a593Smuzhiyun #include <linux/debugfs.h>
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun static inline struct dentry *
dmaengine_get_debugfs_root(struct dma_device * dma_dev)189*4882a593Smuzhiyun dmaengine_get_debugfs_root(struct dma_device *dma_dev) {
190*4882a593Smuzhiyun 	return dma_dev->dbg_dev_root;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun #else
193*4882a593Smuzhiyun struct dentry;
194*4882a593Smuzhiyun static inline struct dentry *
dmaengine_get_debugfs_root(struct dma_device * dma_dev)195*4882a593Smuzhiyun dmaengine_get_debugfs_root(struct dma_device *dma_dev)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun 	return NULL;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun #endif /* CONFIG_DEBUG_FS */
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun #endif
202