1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Virtual DMA channel support for DMAengine
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2012 Russell King
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun #ifndef VIRT_DMA_H
8*4882a593Smuzhiyun #define VIRT_DMA_H
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/dmaengine.h>
11*4882a593Smuzhiyun #include <linux/interrupt.h>
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include "dmaengine.h"
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun struct virt_dma_desc {
16*4882a593Smuzhiyun struct dma_async_tx_descriptor tx;
17*4882a593Smuzhiyun struct dmaengine_result tx_result;
18*4882a593Smuzhiyun /* protected by vc.lock */
19*4882a593Smuzhiyun struct list_head node;
20*4882a593Smuzhiyun };
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun struct virt_dma_chan {
23*4882a593Smuzhiyun struct dma_chan chan;
24*4882a593Smuzhiyun struct tasklet_struct task;
25*4882a593Smuzhiyun void (*desc_free)(struct virt_dma_desc *);
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun spinlock_t lock;
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun /* protected by vc.lock */
30*4882a593Smuzhiyun struct list_head desc_allocated;
31*4882a593Smuzhiyun struct list_head desc_submitted;
32*4882a593Smuzhiyun struct list_head desc_issued;
33*4882a593Smuzhiyun struct list_head desc_completed;
34*4882a593Smuzhiyun struct list_head desc_terminated;
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun struct virt_dma_desc *cyclic;
37*4882a593Smuzhiyun };
38*4882a593Smuzhiyun
to_virt_chan(struct dma_chan * chan)39*4882a593Smuzhiyun static inline struct virt_dma_chan *to_virt_chan(struct dma_chan *chan)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun return container_of(chan, struct virt_dma_chan, chan);
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head);
45*4882a593Smuzhiyun void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev);
46*4882a593Smuzhiyun struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *, dma_cookie_t);
47*4882a593Smuzhiyun extern dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *);
48*4882a593Smuzhiyun extern int vchan_tx_desc_free(struct dma_async_tx_descriptor *);
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun /**
51*4882a593Smuzhiyun * vchan_tx_prep - prepare a descriptor
52*4882a593Smuzhiyun * @vc: virtual channel allocating this descriptor
53*4882a593Smuzhiyun * @vd: virtual descriptor to prepare
54*4882a593Smuzhiyun * @tx_flags: flags argument passed in to prepare function
55*4882a593Smuzhiyun */
vchan_tx_prep(struct virt_dma_chan * vc,struct virt_dma_desc * vd,unsigned long tx_flags)56*4882a593Smuzhiyun static inline struct dma_async_tx_descriptor *vchan_tx_prep(struct virt_dma_chan *vc,
57*4882a593Smuzhiyun struct virt_dma_desc *vd, unsigned long tx_flags)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun unsigned long flags;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun dma_async_tx_descriptor_init(&vd->tx, &vc->chan);
62*4882a593Smuzhiyun vd->tx.flags = tx_flags;
63*4882a593Smuzhiyun vd->tx.tx_submit = vchan_tx_submit;
64*4882a593Smuzhiyun vd->tx.desc_free = vchan_tx_desc_free;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun vd->tx_result.result = DMA_TRANS_NOERROR;
67*4882a593Smuzhiyun vd->tx_result.residue = 0;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun spin_lock_irqsave(&vc->lock, flags);
70*4882a593Smuzhiyun list_add_tail(&vd->node, &vc->desc_allocated);
71*4882a593Smuzhiyun spin_unlock_irqrestore(&vc->lock, flags);
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun return &vd->tx;
74*4882a593Smuzhiyun }
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun /**
77*4882a593Smuzhiyun * vchan_issue_pending - move submitted descriptors to issued list
78*4882a593Smuzhiyun * @vc: virtual channel to update
79*4882a593Smuzhiyun *
80*4882a593Smuzhiyun * vc.lock must be held by caller
81*4882a593Smuzhiyun */
vchan_issue_pending(struct virt_dma_chan * vc)82*4882a593Smuzhiyun static inline bool vchan_issue_pending(struct virt_dma_chan *vc)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun list_splice_tail_init(&vc->desc_submitted, &vc->desc_issued);
85*4882a593Smuzhiyun return !list_empty(&vc->desc_issued);
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun /**
89*4882a593Smuzhiyun * vchan_cookie_complete - report completion of a descriptor
90*4882a593Smuzhiyun * @vd: virtual descriptor to update
91*4882a593Smuzhiyun *
92*4882a593Smuzhiyun * vc.lock must be held by caller
93*4882a593Smuzhiyun */
vchan_cookie_complete(struct virt_dma_desc * vd)94*4882a593Smuzhiyun static inline void vchan_cookie_complete(struct virt_dma_desc *vd)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
97*4882a593Smuzhiyun dma_cookie_t cookie;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun cookie = vd->tx.cookie;
100*4882a593Smuzhiyun dma_cookie_complete(&vd->tx);
101*4882a593Smuzhiyun dev_vdbg(vc->chan.device->dev, "txd %p[%x]: marked complete\n",
102*4882a593Smuzhiyun vd, cookie);
103*4882a593Smuzhiyun list_add_tail(&vd->node, &vc->desc_completed);
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun tasklet_schedule(&vc->task);
106*4882a593Smuzhiyun }
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun /**
109*4882a593Smuzhiyun * vchan_vdesc_fini - Free or reuse a descriptor
110*4882a593Smuzhiyun * @vd: virtual descriptor to free/reuse
111*4882a593Smuzhiyun */
vchan_vdesc_fini(struct virt_dma_desc * vd)112*4882a593Smuzhiyun static inline void vchan_vdesc_fini(struct virt_dma_desc *vd)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun if (dmaengine_desc_test_reuse(&vd->tx)) {
117*4882a593Smuzhiyun unsigned long flags;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun spin_lock_irqsave(&vc->lock, flags);
120*4882a593Smuzhiyun list_add(&vd->node, &vc->desc_allocated);
121*4882a593Smuzhiyun spin_unlock_irqrestore(&vc->lock, flags);
122*4882a593Smuzhiyun } else {
123*4882a593Smuzhiyun vc->desc_free(vd);
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun /**
128*4882a593Smuzhiyun * vchan_cyclic_callback - report the completion of a period
129*4882a593Smuzhiyun * @vd: virtual descriptor
130*4882a593Smuzhiyun */
vchan_cyclic_callback(struct virt_dma_desc * vd)131*4882a593Smuzhiyun static inline void vchan_cyclic_callback(struct virt_dma_desc *vd)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun vc->cyclic = vd;
136*4882a593Smuzhiyun tasklet_schedule(&vc->task);
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun /**
140*4882a593Smuzhiyun * vchan_terminate_vdesc - Disable pending cyclic callback
141*4882a593Smuzhiyun * @vd: virtual descriptor to be terminated
142*4882a593Smuzhiyun *
143*4882a593Smuzhiyun * vc.lock must be held by caller
144*4882a593Smuzhiyun */
vchan_terminate_vdesc(struct virt_dma_desc * vd)145*4882a593Smuzhiyun static inline void vchan_terminate_vdesc(struct virt_dma_desc *vd)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun struct virt_dma_chan *vc = to_virt_chan(vd->tx.chan);
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun list_add_tail(&vd->node, &vc->desc_terminated);
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun if (vc->cyclic == vd)
152*4882a593Smuzhiyun vc->cyclic = NULL;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun /**
156*4882a593Smuzhiyun * vchan_next_desc - peek at the next descriptor to be processed
157*4882a593Smuzhiyun * @vc: virtual channel to obtain descriptor from
158*4882a593Smuzhiyun *
159*4882a593Smuzhiyun * vc.lock must be held by caller
160*4882a593Smuzhiyun */
vchan_next_desc(struct virt_dma_chan * vc)161*4882a593Smuzhiyun static inline struct virt_dma_desc *vchan_next_desc(struct virt_dma_chan *vc)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun return list_first_entry_or_null(&vc->desc_issued,
164*4882a593Smuzhiyun struct virt_dma_desc, node);
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun /**
168*4882a593Smuzhiyun * vchan_get_all_descriptors - obtain all submitted and issued descriptors
169*4882a593Smuzhiyun * @vc: virtual channel to get descriptors from
170*4882a593Smuzhiyun * @head: list of descriptors found
171*4882a593Smuzhiyun *
172*4882a593Smuzhiyun * vc.lock must be held by caller
173*4882a593Smuzhiyun *
174*4882a593Smuzhiyun * Removes all submitted and issued descriptors from internal lists, and
175*4882a593Smuzhiyun * provides a list of all descriptors found
176*4882a593Smuzhiyun */
vchan_get_all_descriptors(struct virt_dma_chan * vc,struct list_head * head)177*4882a593Smuzhiyun static inline void vchan_get_all_descriptors(struct virt_dma_chan *vc,
178*4882a593Smuzhiyun struct list_head *head)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun list_splice_tail_init(&vc->desc_allocated, head);
181*4882a593Smuzhiyun list_splice_tail_init(&vc->desc_submitted, head);
182*4882a593Smuzhiyun list_splice_tail_init(&vc->desc_issued, head);
183*4882a593Smuzhiyun list_splice_tail_init(&vc->desc_completed, head);
184*4882a593Smuzhiyun list_splice_tail_init(&vc->desc_terminated, head);
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
vchan_free_chan_resources(struct virt_dma_chan * vc)187*4882a593Smuzhiyun static inline void vchan_free_chan_resources(struct virt_dma_chan *vc)
188*4882a593Smuzhiyun {
189*4882a593Smuzhiyun struct virt_dma_desc *vd;
190*4882a593Smuzhiyun unsigned long flags;
191*4882a593Smuzhiyun LIST_HEAD(head);
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun spin_lock_irqsave(&vc->lock, flags);
194*4882a593Smuzhiyun vchan_get_all_descriptors(vc, &head);
195*4882a593Smuzhiyun list_for_each_entry(vd, &head, node)
196*4882a593Smuzhiyun dmaengine_desc_clear_reuse(&vd->tx);
197*4882a593Smuzhiyun spin_unlock_irqrestore(&vc->lock, flags);
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun vchan_dma_desc_free_list(vc, &head);
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun /**
203*4882a593Smuzhiyun * vchan_synchronize() - synchronize callback execution to the current context
204*4882a593Smuzhiyun * @vc: virtual channel to synchronize
205*4882a593Smuzhiyun *
206*4882a593Smuzhiyun * Makes sure that all scheduled or active callbacks have finished running. For
207*4882a593Smuzhiyun * proper operation the caller has to ensure that no new callbacks are scheduled
208*4882a593Smuzhiyun * after the invocation of this function started.
209*4882a593Smuzhiyun * Free up the terminated cyclic descriptor to prevent memory leakage.
210*4882a593Smuzhiyun */
vchan_synchronize(struct virt_dma_chan * vc)211*4882a593Smuzhiyun static inline void vchan_synchronize(struct virt_dma_chan *vc)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun LIST_HEAD(head);
214*4882a593Smuzhiyun unsigned long flags;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun tasklet_kill(&vc->task);
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun spin_lock_irqsave(&vc->lock, flags);
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun list_splice_tail_init(&vc->desc_terminated, &head);
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun spin_unlock_irqrestore(&vc->lock, flags);
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun vchan_dma_desc_free_list(vc, &head);
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun #endif
228