xref: /OK3568_Linux_fs/kernel/drivers/dma/virt-dma.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Virtual DMA channel support for DMAengine
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2012 Russell King
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun #include <linux/device.h>
8*4882a593Smuzhiyun #include <linux/dmaengine.h>
9*4882a593Smuzhiyun #include <linux/module.h>
10*4882a593Smuzhiyun #include <linux/spinlock.h>
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include "virt-dma.h"
13*4882a593Smuzhiyun 
to_virt_desc(struct dma_async_tx_descriptor * tx)14*4882a593Smuzhiyun static struct virt_dma_desc *to_virt_desc(struct dma_async_tx_descriptor *tx)
15*4882a593Smuzhiyun {
16*4882a593Smuzhiyun 	return container_of(tx, struct virt_dma_desc, tx);
17*4882a593Smuzhiyun }
18*4882a593Smuzhiyun 
vchan_tx_submit(struct dma_async_tx_descriptor * tx)19*4882a593Smuzhiyun dma_cookie_t vchan_tx_submit(struct dma_async_tx_descriptor *tx)
20*4882a593Smuzhiyun {
21*4882a593Smuzhiyun 	struct virt_dma_chan *vc = to_virt_chan(tx->chan);
22*4882a593Smuzhiyun 	struct virt_dma_desc *vd = to_virt_desc(tx);
23*4882a593Smuzhiyun 	unsigned long flags;
24*4882a593Smuzhiyun 	dma_cookie_t cookie;
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun 	spin_lock_irqsave(&vc->lock, flags);
27*4882a593Smuzhiyun 	cookie = dma_cookie_assign(tx);
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 	list_move_tail(&vd->node, &vc->desc_submitted);
30*4882a593Smuzhiyun 	spin_unlock_irqrestore(&vc->lock, flags);
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun 	dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
33*4882a593Smuzhiyun 		vc, vd, cookie);
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 	return cookie;
36*4882a593Smuzhiyun }
37*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vchan_tx_submit);
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun /**
40*4882a593Smuzhiyun  * vchan_tx_desc_free - free a reusable descriptor
41*4882a593Smuzhiyun  * @tx: the transfer
42*4882a593Smuzhiyun  *
43*4882a593Smuzhiyun  * This function frees a previously allocated reusable descriptor. The only
44*4882a593Smuzhiyun  * other way is to clear the DMA_CTRL_REUSE flag and submit one last time the
45*4882a593Smuzhiyun  * transfer.
46*4882a593Smuzhiyun  *
47*4882a593Smuzhiyun  * Returns 0 upon success
48*4882a593Smuzhiyun  */
vchan_tx_desc_free(struct dma_async_tx_descriptor * tx)49*4882a593Smuzhiyun int vchan_tx_desc_free(struct dma_async_tx_descriptor *tx)
50*4882a593Smuzhiyun {
51*4882a593Smuzhiyun 	struct virt_dma_chan *vc = to_virt_chan(tx->chan);
52*4882a593Smuzhiyun 	struct virt_dma_desc *vd = to_virt_desc(tx);
53*4882a593Smuzhiyun 	unsigned long flags;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	spin_lock_irqsave(&vc->lock, flags);
56*4882a593Smuzhiyun 	list_del(&vd->node);
57*4882a593Smuzhiyun 	spin_unlock_irqrestore(&vc->lock, flags);
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: freeing\n",
60*4882a593Smuzhiyun 		vc, vd, vd->tx.cookie);
61*4882a593Smuzhiyun 	vc->desc_free(vd);
62*4882a593Smuzhiyun 	return 0;
63*4882a593Smuzhiyun }
64*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vchan_tx_desc_free);
65*4882a593Smuzhiyun 
vchan_find_desc(struct virt_dma_chan * vc,dma_cookie_t cookie)66*4882a593Smuzhiyun struct virt_dma_desc *vchan_find_desc(struct virt_dma_chan *vc,
67*4882a593Smuzhiyun 	dma_cookie_t cookie)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun 	struct virt_dma_desc *vd;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	list_for_each_entry(vd, &vc->desc_issued, node)
72*4882a593Smuzhiyun 		if (vd->tx.cookie == cookie)
73*4882a593Smuzhiyun 			return vd;
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	return NULL;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vchan_find_desc);
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun /*
80*4882a593Smuzhiyun  * This tasklet handles the completion of a DMA descriptor by
81*4882a593Smuzhiyun  * calling its callback and freeing it.
82*4882a593Smuzhiyun  */
vchan_complete(struct tasklet_struct * t)83*4882a593Smuzhiyun static void vchan_complete(struct tasklet_struct *t)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun 	struct virt_dma_chan *vc = from_tasklet(vc, t, task);
86*4882a593Smuzhiyun 	struct virt_dma_desc *vd, *_vd;
87*4882a593Smuzhiyun 	struct dmaengine_desc_callback cb;
88*4882a593Smuzhiyun 	LIST_HEAD(head);
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	spin_lock_irq(&vc->lock);
91*4882a593Smuzhiyun 	list_splice_tail_init(&vc->desc_completed, &head);
92*4882a593Smuzhiyun 	vd = vc->cyclic;
93*4882a593Smuzhiyun 	if (vd) {
94*4882a593Smuzhiyun 		vc->cyclic = NULL;
95*4882a593Smuzhiyun 		dmaengine_desc_get_callback(&vd->tx, &cb);
96*4882a593Smuzhiyun 	} else {
97*4882a593Smuzhiyun 		memset(&cb, 0, sizeof(cb));
98*4882a593Smuzhiyun 	}
99*4882a593Smuzhiyun 	spin_unlock_irq(&vc->lock);
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	list_for_each_entry_safe(vd, _vd, &head, node) {
104*4882a593Smuzhiyun 		dmaengine_desc_get_callback(&vd->tx, &cb);
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 		list_del(&vd->node);
107*4882a593Smuzhiyun 		dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
108*4882a593Smuzhiyun 		vchan_vdesc_fini(vd);
109*4882a593Smuzhiyun 	}
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun 
vchan_dma_desc_free_list(struct virt_dma_chan * vc,struct list_head * head)112*4882a593Smuzhiyun void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun 	struct virt_dma_desc *vd, *_vd;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	list_for_each_entry_safe(vd, _vd, head, node) {
117*4882a593Smuzhiyun 		list_del(&vd->node);
118*4882a593Smuzhiyun 		vchan_vdesc_fini(vd);
119*4882a593Smuzhiyun 	}
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
122*4882a593Smuzhiyun 
vchan_init(struct virt_dma_chan * vc,struct dma_device * dmadev)123*4882a593Smuzhiyun void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun 	dma_cookie_init(&vc->chan);
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	spin_lock_init(&vc->lock);
128*4882a593Smuzhiyun 	INIT_LIST_HEAD(&vc->desc_allocated);
129*4882a593Smuzhiyun 	INIT_LIST_HEAD(&vc->desc_submitted);
130*4882a593Smuzhiyun 	INIT_LIST_HEAD(&vc->desc_issued);
131*4882a593Smuzhiyun 	INIT_LIST_HEAD(&vc->desc_completed);
132*4882a593Smuzhiyun 	INIT_LIST_HEAD(&vc->desc_terminated);
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	tasklet_setup(&vc->task, vchan_complete);
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	vc->chan.device = dmadev;
137*4882a593Smuzhiyun 	list_add_tail(&vc->chan.device_node, &dmadev->channels);
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(vchan_init);
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun MODULE_AUTHOR("Russell King");
142*4882a593Smuzhiyun MODULE_LICENSE("GPL");
143