xref: /OK3568_Linux_fs/kernel/include/linux/iio/buffer-dma.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright 2013-2015 Analog Devices Inc.
4*4882a593Smuzhiyun  *  Author: Lars-Peter Clausen <lars@metafoo.de>
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #ifndef __INDUSTRIALIO_DMA_BUFFER_H__
8*4882a593Smuzhiyun #define __INDUSTRIALIO_DMA_BUFFER_H__
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/list.h>
11*4882a593Smuzhiyun #include <linux/kref.h>
12*4882a593Smuzhiyun #include <linux/spinlock.h>
13*4882a593Smuzhiyun #include <linux/mutex.h>
14*4882a593Smuzhiyun #include <linux/iio/buffer_impl.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun struct iio_dma_buffer_queue;
17*4882a593Smuzhiyun struct iio_dma_buffer_ops;
18*4882a593Smuzhiyun struct device;
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun struct iio_buffer_block {
21*4882a593Smuzhiyun 	u32 size;
22*4882a593Smuzhiyun 	u32 bytes_used;
23*4882a593Smuzhiyun };
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun /**
26*4882a593Smuzhiyun  * enum iio_block_state - State of a struct iio_dma_buffer_block
27*4882a593Smuzhiyun  * @IIO_BLOCK_STATE_DEQUEUED: Block is not queued
28*4882a593Smuzhiyun  * @IIO_BLOCK_STATE_QUEUED: Block is on the incoming queue
29*4882a593Smuzhiyun  * @IIO_BLOCK_STATE_ACTIVE: Block is currently being processed by the DMA
30*4882a593Smuzhiyun  * @IIO_BLOCK_STATE_DONE: Block is on the outgoing queue
31*4882a593Smuzhiyun  * @IIO_BLOCK_STATE_DEAD: Block has been marked as to be freed
32*4882a593Smuzhiyun  */
33*4882a593Smuzhiyun enum iio_block_state {
34*4882a593Smuzhiyun 	IIO_BLOCK_STATE_DEQUEUED,
35*4882a593Smuzhiyun 	IIO_BLOCK_STATE_QUEUED,
36*4882a593Smuzhiyun 	IIO_BLOCK_STATE_ACTIVE,
37*4882a593Smuzhiyun 	IIO_BLOCK_STATE_DONE,
38*4882a593Smuzhiyun 	IIO_BLOCK_STATE_DEAD,
39*4882a593Smuzhiyun };
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun /**
42*4882a593Smuzhiyun  * struct iio_dma_buffer_block - IIO buffer block
43*4882a593Smuzhiyun  * @head: List head
44*4882a593Smuzhiyun  * @size: Total size of the block in bytes
45*4882a593Smuzhiyun  * @bytes_used: Number of bytes that contain valid data
46*4882a593Smuzhiyun  * @vaddr: Virutal address of the blocks memory
47*4882a593Smuzhiyun  * @phys_addr: Physical address of the blocks memory
48*4882a593Smuzhiyun  * @queue: Parent DMA buffer queue
49*4882a593Smuzhiyun  * @kref: kref used to manage the lifetime of block
50*4882a593Smuzhiyun  * @state: Current state of the block
51*4882a593Smuzhiyun  */
52*4882a593Smuzhiyun struct iio_dma_buffer_block {
53*4882a593Smuzhiyun 	/* May only be accessed by the owner of the block */
54*4882a593Smuzhiyun 	struct list_head head;
55*4882a593Smuzhiyun 	size_t bytes_used;
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	/*
58*4882a593Smuzhiyun 	 * Set during allocation, constant thereafter. May be accessed read-only
59*4882a593Smuzhiyun 	 * by anybody holding a reference to the block.
60*4882a593Smuzhiyun 	 */
61*4882a593Smuzhiyun 	void *vaddr;
62*4882a593Smuzhiyun 	dma_addr_t phys_addr;
63*4882a593Smuzhiyun 	size_t size;
64*4882a593Smuzhiyun 	struct iio_dma_buffer_queue *queue;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	/* Must not be accessed outside the core. */
67*4882a593Smuzhiyun 	struct kref kref;
68*4882a593Smuzhiyun 	/*
69*4882a593Smuzhiyun 	 * Must not be accessed outside the core. Access needs to hold
70*4882a593Smuzhiyun 	 * queue->list_lock if the block is not owned by the core.
71*4882a593Smuzhiyun 	 */
72*4882a593Smuzhiyun 	enum iio_block_state state;
73*4882a593Smuzhiyun };
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun /**
76*4882a593Smuzhiyun  * struct iio_dma_buffer_queue_fileio - FileIO state for the DMA buffer
77*4882a593Smuzhiyun  * @blocks: Buffer blocks used for fileio
78*4882a593Smuzhiyun  * @active_block: Block being used in read()
79*4882a593Smuzhiyun  * @pos: Read offset in the active block
80*4882a593Smuzhiyun  * @block_size: Size of each block
81*4882a593Smuzhiyun  */
82*4882a593Smuzhiyun struct iio_dma_buffer_queue_fileio {
83*4882a593Smuzhiyun 	struct iio_dma_buffer_block *blocks[2];
84*4882a593Smuzhiyun 	struct iio_dma_buffer_block *active_block;
85*4882a593Smuzhiyun 	size_t pos;
86*4882a593Smuzhiyun 	size_t block_size;
87*4882a593Smuzhiyun };
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun /**
90*4882a593Smuzhiyun  * struct iio_dma_buffer_queue - DMA buffer base structure
91*4882a593Smuzhiyun  * @buffer: IIO buffer base structure
92*4882a593Smuzhiyun  * @dev: Parent device
93*4882a593Smuzhiyun  * @ops: DMA buffer callbacks
94*4882a593Smuzhiyun  * @lock: Protects the incoming list, active and the fields in the fileio
95*4882a593Smuzhiyun  *   substruct
96*4882a593Smuzhiyun  * @list_lock: Protects lists that contain blocks which can be modified in
97*4882a593Smuzhiyun  *   atomic context as well as blocks on those lists. This is the outgoing queue
98*4882a593Smuzhiyun  *   list and typically also a list of active blocks in the part that handles
99*4882a593Smuzhiyun  *   the DMA controller
100*4882a593Smuzhiyun  * @incoming: List of buffers on the incoming queue
101*4882a593Smuzhiyun  * @outgoing: List of buffers on the outgoing queue
102*4882a593Smuzhiyun  * @active: Whether the buffer is currently active
103*4882a593Smuzhiyun  * @fileio: FileIO state
104*4882a593Smuzhiyun  */
105*4882a593Smuzhiyun struct iio_dma_buffer_queue {
106*4882a593Smuzhiyun 	struct iio_buffer buffer;
107*4882a593Smuzhiyun 	struct device *dev;
108*4882a593Smuzhiyun 	const struct iio_dma_buffer_ops *ops;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	struct mutex lock;
111*4882a593Smuzhiyun 	spinlock_t list_lock;
112*4882a593Smuzhiyun 	struct list_head incoming;
113*4882a593Smuzhiyun 	struct list_head outgoing;
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	bool active;
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	struct iio_dma_buffer_queue_fileio fileio;
118*4882a593Smuzhiyun };
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun /**
121*4882a593Smuzhiyun  * struct iio_dma_buffer_ops - DMA buffer callback operations
122*4882a593Smuzhiyun  * @submit: Called when a block is submitted to the DMA controller
123*4882a593Smuzhiyun  * @abort: Should abort all pending transfers
124*4882a593Smuzhiyun  */
125*4882a593Smuzhiyun struct iio_dma_buffer_ops {
126*4882a593Smuzhiyun 	int (*submit)(struct iio_dma_buffer_queue *queue,
127*4882a593Smuzhiyun 		struct iio_dma_buffer_block *block);
128*4882a593Smuzhiyun 	void (*abort)(struct iio_dma_buffer_queue *queue);
129*4882a593Smuzhiyun };
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun void iio_dma_buffer_block_done(struct iio_dma_buffer_block *block);
132*4882a593Smuzhiyun void iio_dma_buffer_block_list_abort(struct iio_dma_buffer_queue *queue,
133*4882a593Smuzhiyun 	struct list_head *list);
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun int iio_dma_buffer_enable(struct iio_buffer *buffer,
136*4882a593Smuzhiyun 	struct iio_dev *indio_dev);
137*4882a593Smuzhiyun int iio_dma_buffer_disable(struct iio_buffer *buffer,
138*4882a593Smuzhiyun 	struct iio_dev *indio_dev);
139*4882a593Smuzhiyun int iio_dma_buffer_read(struct iio_buffer *buffer, size_t n,
140*4882a593Smuzhiyun 	char __user *user_buffer);
141*4882a593Smuzhiyun size_t iio_dma_buffer_data_available(struct iio_buffer *buffer);
142*4882a593Smuzhiyun int iio_dma_buffer_set_bytes_per_datum(struct iio_buffer *buffer, size_t bpd);
143*4882a593Smuzhiyun int iio_dma_buffer_set_length(struct iio_buffer *buffer, unsigned int length);
144*4882a593Smuzhiyun int iio_dma_buffer_request_update(struct iio_buffer *buffer);
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun int iio_dma_buffer_init(struct iio_dma_buffer_queue *queue,
147*4882a593Smuzhiyun 	struct device *dma_dev, const struct iio_dma_buffer_ops *ops);
148*4882a593Smuzhiyun void iio_dma_buffer_exit(struct iio_dma_buffer_queue *queue);
149*4882a593Smuzhiyun void iio_dma_buffer_release(struct iio_dma_buffer_queue *queue);
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun #endif
152