xref: /OK3568_Linux_fs/kernel/drivers/pci/controller/rockchip-pcie-dma.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2018 Rockchip Electronics Co., Ltd.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/delay.h>
7*4882a593Smuzhiyun #include <linux/dma-mapping.h>
8*4882a593Smuzhiyun #include <linux/fs.h>
9*4882a593Smuzhiyun #include <linux/gpio.h>
10*4882a593Smuzhiyun #include <linux/init.h>
11*4882a593Smuzhiyun #include <linux/interrupt.h>
12*4882a593Smuzhiyun #include <linux/kernel.h>
13*4882a593Smuzhiyun #include <linux/kthread.h>
14*4882a593Smuzhiyun #include <linux/list.h>
15*4882a593Smuzhiyun #include <linux/miscdevice.h>
16*4882a593Smuzhiyun #include <linux/of_address.h>
17*4882a593Smuzhiyun #include <linux/of_device.h>
18*4882a593Smuzhiyun #include <linux/of_gpio.h>
19*4882a593Smuzhiyun #include <linux/of_pci.h>
20*4882a593Smuzhiyun #include <linux/platform_device.h>
21*4882a593Smuzhiyun #include <linux/poll.h>
22*4882a593Smuzhiyun #include <linux/reset.h>
23*4882a593Smuzhiyun #include <linux/resource.h>
24*4882a593Smuzhiyun #include <linux/signal.h>
25*4882a593Smuzhiyun #include <linux/types.h>
26*4882a593Smuzhiyun #include <linux/uaccess.h>
27*4882a593Smuzhiyun #include <uapi/linux/rk-pcie-dma.h>
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #include "rockchip-pcie-dma.h"
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun /* dma transfer */
32*4882a593Smuzhiyun /*
33*4882a593Smuzhiyun  * Write buffer format
34*4882a593Smuzhiyun  * 0	     4               8	       0xc	0x10	SZ_1M
35*4882a593Smuzhiyun  * ------------------------------------------------------
36*4882a593Smuzhiyun  * |0x12345678|local idx(0-7)|data size|reserve	|data	|
37*4882a593Smuzhiyun  * ------------------------------------------------------
38*4882a593Smuzhiyun  *
39*4882a593Smuzhiyun  * Byte 3-0: Receiver check if a valid data package arrived
40*4882a593Smuzhiyun  * Byte 7-4: As a index for data rcv ack buffer
41*4882a593Smuzhiyun  * Byte 11-8: Actual data size
42*4882a593Smuzhiyun  *
43*4882a593Smuzhiyun  * Data rcv ack buffer format
44*4882a593Smuzhiyun  * 0		4B
45*4882a593Smuzhiyun  * --------------
46*4882a593Smuzhiyun  * |0xdeadbeef	|
47*4882a593Smuzhiyun  * --------------
48*4882a593Smuzhiyun  *
49*4882a593Smuzhiyun  * Data free ack buffer format
50*4882a593Smuzhiyun  * 0		4B
51*4882a593Smuzhiyun  * --------------
52*4882a593Smuzhiyun  * |0xcafebabe	|
53*4882a593Smuzhiyun  * --------------
54*4882a593Smuzhiyun  *
55*4882a593Smuzhiyun  *	RC		EP
56*4882a593Smuzhiyun  * -	---------	---------
57*4882a593Smuzhiyun  * |	|  1MB	|	|	|
58*4882a593Smuzhiyun  * |	|------	|	|	|
59*4882a593Smuzhiyun  * |	|	|	|	|
60*4882a593Smuzhiyun  * |	|	|	|	|
61*4882a593Smuzhiyun  * 8MB	|wr buf	|  ->	|rd buf	|
62*4882a593Smuzhiyun  * |	|	|	|	|
63*4882a593Smuzhiyun  * |	|	|	|	|
64*4882a593Smuzhiyun  * |	|	|	|	|
65*4882a593Smuzhiyun  * -	---------	---------
66*4882a593Smuzhiyun  * |	|  1MB	|	|	|
67*4882a593Smuzhiyun  * |	|------	|	|	|
68*4882a593Smuzhiyun  * |	|	|	|	|
69*4882a593Smuzhiyun  * |	|	|	|	|
70*4882a593Smuzhiyun  * 8MB	|rd buf	|  <-	|wr buf	|
71*4882a593Smuzhiyun  * |	|	|	|	|
72*4882a593Smuzhiyun  * |	|	|	|	|
73*4882a593Smuzhiyun  * |	|	|	|	|
74*4882a593Smuzhiyun  * -	---------	---------
75*4882a593Smuzhiyun  * |	|  4B	|	|	|
76*4882a593Smuzhiyun  * |	|------	|	|	|
77*4882a593Smuzhiyun  * |	|	|	|	|
78*4882a593Smuzhiyun  * 32B	|	|	|	|
79*4882a593Smuzhiyun  * |	|scan	|  <-	|data	|
80*4882a593Smuzhiyun  * |	|	|	|rcv	|
81*4882a593Smuzhiyun  * |	|	|	|ack	|
82*4882a593Smuzhiyun  * |	|	|	|send	|
83*4882a593Smuzhiyun  * -	---------	---------
84*4882a593Smuzhiyun  * |	|  4B	|	|	|
85*4882a593Smuzhiyun  * |	|------	|	|	|
86*4882a593Smuzhiyun  * |	|	|	|	|
87*4882a593Smuzhiyun  * 32B	|data	|  ->	|scan	|
88*4882a593Smuzhiyun  * |	|rcv	|	|	|
89*4882a593Smuzhiyun  * |	|ack	|	|	|
90*4882a593Smuzhiyun  * |	|send	|	|	|
91*4882a593Smuzhiyun  * |	|	|	|	|
92*4882a593Smuzhiyun  * -	---------	---------
93*4882a593Smuzhiyun  * |	|  4B	|	|	|
94*4882a593Smuzhiyun  * |	|------	|	|	|
95*4882a593Smuzhiyun  * |	|	|	|	|
96*4882a593Smuzhiyun  * 32B	|	|	|	|
97*4882a593Smuzhiyun  * |	|scan	|  <-	|data	|
98*4882a593Smuzhiyun  * |	|	|	|free	|
99*4882a593Smuzhiyun  * |	|	|	|ack	|
100*4882a593Smuzhiyun  * |	|	|	|send	|
101*4882a593Smuzhiyun  * -	---------	---------
102*4882a593Smuzhiyun  * |	|4B	|	|	|
103*4882a593Smuzhiyun  * |	|------	|	|	|
104*4882a593Smuzhiyun  * |	|	|	|	|
105*4882a593Smuzhiyun  * 32B	|data	|  ->	|scan	|
106*4882a593Smuzhiyun  * |	|free	|	|	|
107*4882a593Smuzhiyun  * |	|ack	|	|	|
108*4882a593Smuzhiyun  * |	|send	|	|	|
109*4882a593Smuzhiyun  * |	|	|	|	|
110*4882a593Smuzhiyun  * -	---------	---------
111*4882a593Smuzhiyun  */
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun #define NODE_SIZE		(sizeof(unsigned int))
114*4882a593Smuzhiyun #define PCIE_DMA_ACK_BLOCK_SIZE		(NODE_SIZE * 8)
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun #define PCIE_DMA_BUF_CNT		8
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun #define PCIE_DMA_DATA_CHECK		0x12345678
119*4882a593Smuzhiyun #define PCIE_DMA_DATA_ACK_CHECK		0xdeadbeef
120*4882a593Smuzhiyun #define PCIE_DMA_DATA_FREE_ACK_CHECK	0xcafebabe
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun #define PCIE_DMA_PARAM_SIZE		64
123*4882a593Smuzhiyun #define PCIE_DMA_CHN0			0x0
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun enum transfer_type {
126*4882a593Smuzhiyun 	PCIE_DMA_DATA_SND,
127*4882a593Smuzhiyun 	PCIE_DMA_DATA_RCV_ACK,
128*4882a593Smuzhiyun 	PCIE_DMA_DATA_FREE_ACK,
129*4882a593Smuzhiyun 	PCIE_DMA_READ_REMOTE,
130*4882a593Smuzhiyun };
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun static int enable_check_sum;
133*4882a593Smuzhiyun struct pcie_misc_dev {
134*4882a593Smuzhiyun 	struct miscdevice dev;
135*4882a593Smuzhiyun 	struct dma_trx_obj *obj;
136*4882a593Smuzhiyun };
137*4882a593Smuzhiyun static void *rk_pcie_map_kernel(phys_addr_t start, size_t len);
138*4882a593Smuzhiyun static void rk_pcie_unmap_kernel(void *vaddr);
139*4882a593Smuzhiyun 
is_rc(struct dma_trx_obj * obj)140*4882a593Smuzhiyun static inline bool is_rc(struct dma_trx_obj *obj)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun 	return (obj->busno == 0);
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun 
rk_pcie_check_sum(unsigned int * src,int size)145*4882a593Smuzhiyun static unsigned int rk_pcie_check_sum(unsigned int *src, int size)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun 	unsigned int result = 0;
148*4882a593Smuzhiyun 
149*4882a593Smuzhiyun 	size /= sizeof(*src);
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	while (size-- > 0)
152*4882a593Smuzhiyun 		result ^= *src++;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	return result;
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun 
rk_pcie_handle_dma_interrupt(struct dma_trx_obj * obj,u32 chn,enum dma_dir dir)157*4882a593Smuzhiyun static int rk_pcie_handle_dma_interrupt(struct dma_trx_obj *obj, u32 chn, enum dma_dir dir)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun 	struct dma_table *cur;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	cur = obj->cur;
162*4882a593Smuzhiyun 	if (!cur) {
163*4882a593Smuzhiyun 		pr_err("no pcie dma table\n");
164*4882a593Smuzhiyun 		return 0;
165*4882a593Smuzhiyun 	}
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	obj->dma_free = true;
168*4882a593Smuzhiyun 	obj->irq_num++;
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	if (cur->dir == DMA_TO_BUS) {
171*4882a593Smuzhiyun 		if (list_empty(&obj->tbl_list)) {
172*4882a593Smuzhiyun 			if (obj->dma_free &&
173*4882a593Smuzhiyun 			    obj->loop_count >= obj->loop_count_threshold)
174*4882a593Smuzhiyun 				complete(&obj->done);
175*4882a593Smuzhiyun 		}
176*4882a593Smuzhiyun 	}
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	return 0;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun 
rk_pcie_prepare_dma(struct dma_trx_obj * obj,unsigned int idx,unsigned int bus_idx,unsigned int local_idx,size_t buf_size,enum transfer_type type,int chn)181*4882a593Smuzhiyun static void rk_pcie_prepare_dma(struct dma_trx_obj *obj,
182*4882a593Smuzhiyun 			unsigned int idx, unsigned int bus_idx,
183*4882a593Smuzhiyun 			unsigned int local_idx, size_t buf_size,
184*4882a593Smuzhiyun 			enum transfer_type type, int chn)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun 	struct device *dev = obj->dev;
187*4882a593Smuzhiyun 	phys_addr_t local, bus;
188*4882a593Smuzhiyun 	void *virt;
189*4882a593Smuzhiyun 	unsigned long flags;
190*4882a593Smuzhiyun 	struct dma_table *table = NULL;
191*4882a593Smuzhiyun 	unsigned int checksum;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	switch (type) {
194*4882a593Smuzhiyun 	case PCIE_DMA_DATA_SND:
195*4882a593Smuzhiyun 		table = obj->table[PCIE_DMA_DATA_SND_TABLE_OFFSET + local_idx];
196*4882a593Smuzhiyun 		table->type = PCIE_DMA_DATA_SND;
197*4882a593Smuzhiyun 		table->dir = DMA_TO_BUS;
198*4882a593Smuzhiyun 		local = obj->local_mem_start + local_idx * obj->buffer_size;
199*4882a593Smuzhiyun 		bus = obj->remote_mem_start + bus_idx * obj->buffer_size;
200*4882a593Smuzhiyun 		virt = obj->local_mem_base + local_idx * obj->buffer_size;
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 		if (obj->addr_reverse) {
203*4882a593Smuzhiyun 			if (is_rc(obj)) {
204*4882a593Smuzhiyun 				local += obj->rd_buf_size;
205*4882a593Smuzhiyun 				virt += obj->rd_buf_size;
206*4882a593Smuzhiyun 				bus += obj->wr_buf_size;
207*4882a593Smuzhiyun 			}
208*4882a593Smuzhiyun 		} else {
209*4882a593Smuzhiyun 			if (!is_rc(obj)) {
210*4882a593Smuzhiyun 				local += obj->rd_buf_size;
211*4882a593Smuzhiyun 				virt += obj->rd_buf_size;
212*4882a593Smuzhiyun 				bus += obj->wr_buf_size;
213*4882a593Smuzhiyun 			}
214*4882a593Smuzhiyun 		}
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 		obj->begin = ktime_get();
217*4882a593Smuzhiyun 		dma_sync_single_for_device(dev, local, buf_size, DMA_TO_DEVICE);
218*4882a593Smuzhiyun 		obj->end = ktime_get();
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 		obj->cache_time_total += ktime_to_ns(ktime_sub(obj->end, obj->begin));
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 		writel(PCIE_DMA_DATA_CHECK, virt + obj->set_data_check_pos);
223*4882a593Smuzhiyun 		writel(local_idx, virt + obj->set_local_idx_pos);
224*4882a593Smuzhiyun 		writel(buf_size, virt + obj->set_buf_size_pos);
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 		if (enable_check_sum) {
227*4882a593Smuzhiyun 			checksum = rk_pcie_check_sum(virt, SZ_1M - 0x10);
228*4882a593Smuzhiyun 			writel(checksum, virt + obj->set_chk_sum_pos);
229*4882a593Smuzhiyun 		}
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 		buf_size = obj->buffer_size;
232*4882a593Smuzhiyun 		break;
233*4882a593Smuzhiyun 	case PCIE_DMA_DATA_RCV_ACK:
234*4882a593Smuzhiyun 		table = obj->table[PCIE_DMA_DATA_RCV_ACK_TABLE_OFFSET + idx];
235*4882a593Smuzhiyun 		table->type = PCIE_DMA_DATA_RCV_ACK;
236*4882a593Smuzhiyun 		table->dir = DMA_TO_BUS;
237*4882a593Smuzhiyun 		local = obj->local_mem_start + obj->ack_base + idx * NODE_SIZE;
238*4882a593Smuzhiyun 		virt = obj->local_mem_base + obj->ack_base + idx * NODE_SIZE;
239*4882a593Smuzhiyun 		bus = obj->remote_mem_start + obj->ack_base + idx * NODE_SIZE;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 		if (is_rc(obj)) {
242*4882a593Smuzhiyun 			local += PCIE_DMA_ACK_BLOCK_SIZE;
243*4882a593Smuzhiyun 			bus += PCIE_DMA_ACK_BLOCK_SIZE;
244*4882a593Smuzhiyun 			virt += PCIE_DMA_ACK_BLOCK_SIZE;
245*4882a593Smuzhiyun 		}
246*4882a593Smuzhiyun 		writel(PCIE_DMA_DATA_ACK_CHECK, virt);
247*4882a593Smuzhiyun 		break;
248*4882a593Smuzhiyun 	case PCIE_DMA_DATA_FREE_ACK:
249*4882a593Smuzhiyun 		table = obj->table[PCIE_DMA_DATA_FREE_ACK_TABLE_OFFSET + idx];
250*4882a593Smuzhiyun 		table->type = PCIE_DMA_DATA_FREE_ACK;
251*4882a593Smuzhiyun 		table->dir = DMA_TO_BUS;
252*4882a593Smuzhiyun 		local = obj->local_mem_start + obj->ack_base + idx * NODE_SIZE;
253*4882a593Smuzhiyun 		bus = obj->remote_mem_start + obj->ack_base + idx * NODE_SIZE;
254*4882a593Smuzhiyun 		virt = obj->local_mem_base + obj->ack_base + idx * NODE_SIZE;
255*4882a593Smuzhiyun 		buf_size = 4;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 		if (is_rc(obj)) {
258*4882a593Smuzhiyun 			local += 3 * PCIE_DMA_ACK_BLOCK_SIZE;
259*4882a593Smuzhiyun 			bus += 3 * PCIE_DMA_ACK_BLOCK_SIZE;
260*4882a593Smuzhiyun 			virt += 3 * PCIE_DMA_ACK_BLOCK_SIZE;
261*4882a593Smuzhiyun 		} else {
262*4882a593Smuzhiyun 			local += 2 * PCIE_DMA_ACK_BLOCK_SIZE;
263*4882a593Smuzhiyun 			bus += 2 * PCIE_DMA_ACK_BLOCK_SIZE;
264*4882a593Smuzhiyun 			virt += 2 * PCIE_DMA_ACK_BLOCK_SIZE;
265*4882a593Smuzhiyun 		}
266*4882a593Smuzhiyun 		writel(PCIE_DMA_DATA_FREE_ACK_CHECK, virt);
267*4882a593Smuzhiyun 		break;
268*4882a593Smuzhiyun 	case PCIE_DMA_READ_REMOTE:
269*4882a593Smuzhiyun 		table = obj->table[PCIE_DMA_DATA_READ_REMOTE_TABLE_OFFSET + local_idx];
270*4882a593Smuzhiyun 		table->type = PCIE_DMA_READ_REMOTE;
271*4882a593Smuzhiyun 		table->dir = DMA_FROM_BUS;
272*4882a593Smuzhiyun 		local = obj->local_mem_start + local_idx * obj->buffer_size;
273*4882a593Smuzhiyun 		bus = obj->remote_mem_start + bus_idx * obj->buffer_size;
274*4882a593Smuzhiyun 		if (!is_rc(obj)) {
275*4882a593Smuzhiyun 			local += obj->rd_buf_size;
276*4882a593Smuzhiyun 			bus += obj->wr_buf_size;
277*4882a593Smuzhiyun 		}
278*4882a593Smuzhiyun 		buf_size = obj->buffer_size;
279*4882a593Smuzhiyun 		break;
280*4882a593Smuzhiyun 	default:
281*4882a593Smuzhiyun 		dev_err(dev, "type = %d not support\n", type);
282*4882a593Smuzhiyun 		return;
283*4882a593Smuzhiyun 	}
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	table->buf_size = buf_size;
286*4882a593Smuzhiyun 	table->bus = bus;
287*4882a593Smuzhiyun 	table->local = local;
288*4882a593Smuzhiyun 	table->chn = chn;
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	if (!obj->config_dma_func) {
291*4882a593Smuzhiyun 		WARN_ON(1);
292*4882a593Smuzhiyun 		return;
293*4882a593Smuzhiyun 	}
294*4882a593Smuzhiyun 	obj->config_dma_func(table);
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	spin_lock_irqsave(&obj->tbl_list_lock, flags);
297*4882a593Smuzhiyun 	list_add_tail(&table->tbl_node, &obj->tbl_list);
298*4882a593Smuzhiyun 	spin_unlock_irqrestore(&obj->tbl_list_lock, flags);
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun 
rk_pcie_dma_trx_work(struct work_struct * work)301*4882a593Smuzhiyun static void rk_pcie_dma_trx_work(struct work_struct *work)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun 	unsigned long flags;
304*4882a593Smuzhiyun 	struct dma_trx_obj *obj = container_of(work,
305*4882a593Smuzhiyun 				struct dma_trx_obj, dma_trx_work);
306*4882a593Smuzhiyun 	struct dma_table *table;
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	while (!list_empty(&obj->tbl_list)) {
309*4882a593Smuzhiyun 		table = list_first_entry(&obj->tbl_list, struct dma_table,
310*4882a593Smuzhiyun 					 tbl_node);
311*4882a593Smuzhiyun 		if (obj->dma_free) {
312*4882a593Smuzhiyun 			obj->dma_free = false;
313*4882a593Smuzhiyun 			spin_lock_irqsave(&obj->tbl_list_lock, flags);
314*4882a593Smuzhiyun 			list_del_init(&table->tbl_node);
315*4882a593Smuzhiyun 			spin_unlock_irqrestore(&obj->tbl_list_lock, flags);
316*4882a593Smuzhiyun 			obj->cur = table;
317*4882a593Smuzhiyun 			if (!obj->start_dma_func) {
318*4882a593Smuzhiyun 				WARN_ON(1);
319*4882a593Smuzhiyun 				return;
320*4882a593Smuzhiyun 			}
321*4882a593Smuzhiyun 			reinit_completion(&obj->done);
322*4882a593Smuzhiyun 			obj->start_dma_func(obj, table);
323*4882a593Smuzhiyun 		}
324*4882a593Smuzhiyun 	}
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun 
rk_pcie_clear_ack(void * addr)327*4882a593Smuzhiyun static void rk_pcie_clear_ack(void *addr)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun 	writel(0x0, addr);
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun 
rk_pcie_scan_timer(struct hrtimer * timer)332*4882a593Smuzhiyun static enum hrtimer_restart rk_pcie_scan_timer(struct hrtimer *timer)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun 	unsigned int sdv;
335*4882a593Smuzhiyun 	unsigned int idx;
336*4882a593Smuzhiyun 	unsigned int sav;
337*4882a593Smuzhiyun 	unsigned int suv;
338*4882a593Smuzhiyun 	void *sda_base;
339*4882a593Smuzhiyun 	void *scan_data_addr;
340*4882a593Smuzhiyun 	void *scan_ack_addr;
341*4882a593Smuzhiyun 	void *scan_user_addr;
342*4882a593Smuzhiyun 	int i;
343*4882a593Smuzhiyun 	bool need_ack = false;
344*4882a593Smuzhiyun 	struct dma_trx_obj *obj = container_of(timer,
345*4882a593Smuzhiyun 					struct dma_trx_obj, scan_timer);
346*4882a593Smuzhiyun 	unsigned int check_sum, check_sum_tmp;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	if (!obj->remote_mem_start) {
349*4882a593Smuzhiyun 		if (is_rc(obj))
350*4882a593Smuzhiyun 			obj->remote_mem_start = readl(obj->region_base + 0x4);
351*4882a593Smuzhiyun 		else
352*4882a593Smuzhiyun 			obj->remote_mem_start = readl(obj->region_base);
353*4882a593Smuzhiyun 		goto continue_scan;
354*4882a593Smuzhiyun 	}
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	for (i = 0; i < PCIE_DMA_BUF_CNT; i++) {
357*4882a593Smuzhiyun 		sda_base = obj->local_mem_base + obj->buffer_size * i;
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 		if (obj->addr_reverse) {
360*4882a593Smuzhiyun 			if (is_rc(obj))
361*4882a593Smuzhiyun 				scan_data_addr = sda_base;
362*4882a593Smuzhiyun 			else
363*4882a593Smuzhiyun 				scan_data_addr =  sda_base + obj->rd_buf_size;
364*4882a593Smuzhiyun 		} else {
365*4882a593Smuzhiyun 			if (is_rc(obj))
366*4882a593Smuzhiyun 				scan_data_addr =  sda_base + obj->rd_buf_size;
367*4882a593Smuzhiyun 			else
368*4882a593Smuzhiyun 				scan_data_addr = sda_base;
369*4882a593Smuzhiyun 		}
370*4882a593Smuzhiyun 		sdv = readl(scan_data_addr + obj->set_data_check_pos);
371*4882a593Smuzhiyun 		idx = readl(scan_data_addr + obj->set_local_idx_pos);
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 		if (sdv == PCIE_DMA_DATA_CHECK) {
374*4882a593Smuzhiyun 			if (!need_ack)
375*4882a593Smuzhiyun 				need_ack = true;
376*4882a593Smuzhiyun 			if (enable_check_sum) {
377*4882a593Smuzhiyun 				check_sum = readl(scan_data_addr + obj->set_chk_sum_pos);
378*4882a593Smuzhiyun 				check_sum_tmp = rk_pcie_check_sum(scan_data_addr, SZ_1M - 0x10);
379*4882a593Smuzhiyun 				if (check_sum != check_sum_tmp) {
380*4882a593Smuzhiyun 					pr_err("checksum[%d] failed, 0x%x, should be 0x%x\n",
381*4882a593Smuzhiyun 					       idx, check_sum_tmp, check_sum);
382*4882a593Smuzhiyun 					print_hex_dump(KERN_WARNING, "", DUMP_PREFIX_OFFSET,
383*4882a593Smuzhiyun 						       32, 4, scan_data_addr, SZ_1M, false);
384*4882a593Smuzhiyun 				}
385*4882a593Smuzhiyun 				writel(0x0, scan_data_addr + obj->set_chk_sum_pos);
386*4882a593Smuzhiyun 			}
387*4882a593Smuzhiyun 			writel(0x0, scan_data_addr + obj->set_data_check_pos);
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 			set_bit(i, &obj->local_read_available);
390*4882a593Smuzhiyun 			rk_pcie_prepare_dma(obj, idx, 0, 0, 0x4,
391*4882a593Smuzhiyun 				PCIE_DMA_DATA_RCV_ACK, PCIE_DMA_DEFAULT_CHN);
392*4882a593Smuzhiyun 		}
393*4882a593Smuzhiyun 	}
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	if (need_ack || !list_empty(&obj->tbl_list))
396*4882a593Smuzhiyun 		queue_work(obj->dma_trx_wq, &obj->dma_trx_work);
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun 	scan_ack_addr = obj->local_mem_base + obj->ack_base;
399*4882a593Smuzhiyun 	scan_user_addr = obj->local_mem_base + obj->ack_base;
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	if (is_rc(obj)) {
402*4882a593Smuzhiyun 		scan_user_addr += PCIE_DMA_ACK_BLOCK_SIZE * 2;
403*4882a593Smuzhiyun 	} else {
404*4882a593Smuzhiyun 		scan_ack_addr += PCIE_DMA_ACK_BLOCK_SIZE;
405*4882a593Smuzhiyun 		scan_user_addr += PCIE_DMA_ACK_BLOCK_SIZE * 3;
406*4882a593Smuzhiyun 	}
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	for (i = 0; i < PCIE_DMA_BUF_CNT; i++) {
409*4882a593Smuzhiyun 		void *addr = scan_ack_addr + i * NODE_SIZE;
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 		sav = readl(addr);
412*4882a593Smuzhiyun 		if (sav == PCIE_DMA_DATA_ACK_CHECK) {
413*4882a593Smuzhiyun 			rk_pcie_clear_ack(addr);
414*4882a593Smuzhiyun 			set_bit(i, &obj->local_write_available);
415*4882a593Smuzhiyun 		}
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 		addr = scan_user_addr + i * NODE_SIZE;
418*4882a593Smuzhiyun 		suv = readl(addr);
419*4882a593Smuzhiyun 		if (suv == PCIE_DMA_DATA_FREE_ACK_CHECK) {
420*4882a593Smuzhiyun 			rk_pcie_clear_ack(addr);
421*4882a593Smuzhiyun 			set_bit(i, &obj->remote_write_available);
422*4882a593Smuzhiyun 		}
423*4882a593Smuzhiyun 	}
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	if ((obj->local_write_available && obj->remote_write_available) ||
426*4882a593Smuzhiyun 		obj->local_read_available) {
427*4882a593Smuzhiyun 		wake_up(&obj->event_queue);
428*4882a593Smuzhiyun 	}
429*4882a593Smuzhiyun 
430*4882a593Smuzhiyun continue_scan:
431*4882a593Smuzhiyun 	hrtimer_add_expires(&obj->scan_timer, ktime_set(0, 100 * 1000));
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	return HRTIMER_RESTART;
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun 
rk_pcie_misc_open(struct inode * inode,struct file * filp)436*4882a593Smuzhiyun static int rk_pcie_misc_open(struct inode *inode, struct file *filp)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun 	struct miscdevice *miscdev = filp->private_data;
439*4882a593Smuzhiyun 	struct pcie_misc_dev *pcie_misc_dev = container_of(miscdev,
440*4882a593Smuzhiyun 						 struct pcie_misc_dev, dev);
441*4882a593Smuzhiyun 
442*4882a593Smuzhiyun 	filp->private_data = pcie_misc_dev->obj;
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	mutex_lock(&pcie_misc_dev->obj->count_mutex);
445*4882a593Smuzhiyun 	if (pcie_misc_dev->obj->ref_count++)
446*4882a593Smuzhiyun 		goto already_opened;
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	pcie_misc_dev->obj->loop_count = 0;
449*4882a593Smuzhiyun 	pcie_misc_dev->obj->local_read_available = 0x0;
450*4882a593Smuzhiyun 	pcie_misc_dev->obj->local_write_available = 0xff;
451*4882a593Smuzhiyun 	pcie_misc_dev->obj->remote_write_available = 0xff;
452*4882a593Smuzhiyun 	pcie_misc_dev->obj->dma_free = true;
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	pr_info("Open pcie misc device success\n");
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun already_opened:
457*4882a593Smuzhiyun 	mutex_unlock(&pcie_misc_dev->obj->count_mutex);
458*4882a593Smuzhiyun 	return 0;
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun 
rk_pcie_misc_release(struct inode * inode,struct file * filp)461*4882a593Smuzhiyun static int rk_pcie_misc_release(struct inode *inode, struct file *filp)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun 	struct dma_trx_obj *obj = filp->private_data;
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 	mutex_lock(&obj->count_mutex);
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	if (--obj->ref_count)
468*4882a593Smuzhiyun 		goto still_opened;
469*4882a593Smuzhiyun 	hrtimer_cancel(&obj->scan_timer);
470*4882a593Smuzhiyun 
471*4882a593Smuzhiyun 	pr_info("Close pcie misc device\n");
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun still_opened:
474*4882a593Smuzhiyun 	mutex_unlock(&obj->count_mutex);
475*4882a593Smuzhiyun 	return 0;
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun 
rk_pcie_misc_mmap(struct file * filp,struct vm_area_struct * vma)478*4882a593Smuzhiyun static int rk_pcie_misc_mmap(struct file *filp,
479*4882a593Smuzhiyun 				     struct vm_area_struct *vma)
480*4882a593Smuzhiyun {
481*4882a593Smuzhiyun 	struct dma_trx_obj *obj = filp->private_data;
482*4882a593Smuzhiyun 	size_t size = vma->vm_end - vma->vm_start;
483*4882a593Smuzhiyun 	int err;
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	err = remap_pfn_range(vma, vma->vm_start,
486*4882a593Smuzhiyun 			    __phys_to_pfn(obj->local_mem_start),
487*4882a593Smuzhiyun 			    size, vma->vm_page_prot);
488*4882a593Smuzhiyun 	if (err)
489*4882a593Smuzhiyun 		return -EAGAIN;
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	return 0;
492*4882a593Smuzhiyun }
rk_pcie_send_addr_to_remote(struct dma_trx_obj * obj)493*4882a593Smuzhiyun static void rk_pcie_send_addr_to_remote(struct dma_trx_obj *obj)
494*4882a593Smuzhiyun {
495*4882a593Smuzhiyun 	struct dma_table *table;
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun 	/* Temporary use to send local buffer address to remote */
498*4882a593Smuzhiyun 	table = obj->table[PCIE_DMA_DATA_SND_TABLE_OFFSET];
499*4882a593Smuzhiyun 	table->type = PCIE_DMA_DATA_SND;
500*4882a593Smuzhiyun 	table->dir = DMA_TO_BUS;
501*4882a593Smuzhiyun 	table->buf_size = 0x4;
502*4882a593Smuzhiyun 	if (is_rc(obj))
503*4882a593Smuzhiyun 		table->local = obj->region_start;
504*4882a593Smuzhiyun 	else
505*4882a593Smuzhiyun 		table->local = obj->region_start + 0x4;
506*4882a593Smuzhiyun 	table->bus = table->local;
507*4882a593Smuzhiyun 	table->chn = PCIE_DMA_DEFAULT_CHN;
508*4882a593Smuzhiyun 	obj->config_dma_func(table);
509*4882a593Smuzhiyun 	obj->cur = table;
510*4882a593Smuzhiyun 	obj->start_dma_func(obj, table);
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun 
rk_pcie_misc_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)513*4882a593Smuzhiyun static long rk_pcie_misc_ioctl(struct file *filp, unsigned int cmd,
514*4882a593Smuzhiyun 					unsigned long arg)
515*4882a593Smuzhiyun {
516*4882a593Smuzhiyun 	struct dma_trx_obj *obj = filp->private_data;
517*4882a593Smuzhiyun 	struct device *dev = obj->dev;
518*4882a593Smuzhiyun 	union pcie_dma_ioctl_param msg;
519*4882a593Smuzhiyun 	union pcie_dma_ioctl_param msg_to_user;
520*4882a593Smuzhiyun 	phys_addr_t addr;
521*4882a593Smuzhiyun 	void __user *uarg = (void __user *)arg;
522*4882a593Smuzhiyun 	int ret;
523*4882a593Smuzhiyun 	int i;
524*4882a593Smuzhiyun 	phys_addr_t addr_send_to_remote;
525*4882a593Smuzhiyun 	enum transfer_type type;
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	if (copy_from_user(&msg, uarg, sizeof(msg)) != 0) {
528*4882a593Smuzhiyun 		dev_err(dev, "failed to copy argument into kernel space\n");
529*4882a593Smuzhiyun 		return -EFAULT;
530*4882a593Smuzhiyun 	}
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	switch (cmd) {
533*4882a593Smuzhiyun 	case PCIE_DMA_START:
534*4882a593Smuzhiyun 		test_and_clear_bit(msg.in.l_widx, &obj->local_write_available);
535*4882a593Smuzhiyun 		test_and_clear_bit(msg.in.r_widx, &obj->remote_write_available);
536*4882a593Smuzhiyun 		type = PCIE_DMA_DATA_SND;
537*4882a593Smuzhiyun 		obj->loop_count++;
538*4882a593Smuzhiyun 		break;
539*4882a593Smuzhiyun 	case PCIE_DMA_GET_LOCAL_READ_BUFFER_INDEX:
540*4882a593Smuzhiyun 		msg_to_user.lra = obj->local_read_available;
541*4882a593Smuzhiyun 		addr = obj->local_mem_start;
542*4882a593Smuzhiyun 		if (is_rc(obj))
543*4882a593Smuzhiyun 			addr += obj->rd_buf_size;
544*4882a593Smuzhiyun 		/* by kernel auto or by user to invalidate cache */
545*4882a593Smuzhiyun 		for (i = 0; i < PCIE_DMA_BUF_CNT; i++) {
546*4882a593Smuzhiyun 			if (test_bit(i, &obj->local_read_available))
547*4882a593Smuzhiyun 				dma_sync_single_for_cpu(dev, addr + i * obj->buffer_size, obj->buffer_size, DMA_FROM_DEVICE);
548*4882a593Smuzhiyun 		}
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 		ret = copy_to_user(uarg, &msg_to_user, sizeof(msg));
551*4882a593Smuzhiyun 		if (ret) {
552*4882a593Smuzhiyun 			dev_err(dev, "failed to get read buffer index\n");
553*4882a593Smuzhiyun 			return -EFAULT;
554*4882a593Smuzhiyun 		}
555*4882a593Smuzhiyun 		break;
556*4882a593Smuzhiyun 	case PCIE_DMA_FREE_LOCAL_READ_BUFFER_INDEX:
557*4882a593Smuzhiyun 		test_and_clear_bit(msg.in.idx, &obj->local_read_available);
558*4882a593Smuzhiyun 		type = PCIE_DMA_DATA_FREE_ACK;
559*4882a593Smuzhiyun 		break;
560*4882a593Smuzhiyun 	case PCIE_DMA_GET_LOCAL_REMOTE_WRITE_BUFFER_INDEX:
561*4882a593Smuzhiyun 		msg_to_user.out.lwa = obj->local_write_available;
562*4882a593Smuzhiyun 		msg_to_user.out.rwa = obj->remote_write_available;
563*4882a593Smuzhiyun 		ret = copy_to_user(uarg, &msg_to_user, sizeof(msg));
564*4882a593Smuzhiyun 		if (ret) {
565*4882a593Smuzhiyun 			dev_err(dev, "failed to get write buffer index\n");
566*4882a593Smuzhiyun 			return -EFAULT;
567*4882a593Smuzhiyun 		}
568*4882a593Smuzhiyun 		break;
569*4882a593Smuzhiyun 	case PCIE_DMA_SYNC_BUFFER_FOR_CPU:
570*4882a593Smuzhiyun 		addr = obj->local_mem_start + msg.in.idx * obj->buffer_size;
571*4882a593Smuzhiyun 		if (is_rc(obj))
572*4882a593Smuzhiyun 			addr += obj->rd_buf_size;
573*4882a593Smuzhiyun 		dma_sync_single_for_cpu(dev, addr, obj->buffer_size,
574*4882a593Smuzhiyun 					DMA_FROM_DEVICE);
575*4882a593Smuzhiyun 		break;
576*4882a593Smuzhiyun 	case PCIE_DMA_WAIT_TRANSFER_COMPLETE:
577*4882a593Smuzhiyun 		ret = wait_for_completion_interruptible(&obj->done);
578*4882a593Smuzhiyun 		if (WARN_ON(ret)) {
579*4882a593Smuzhiyun 			pr_info("failed to wait complete\n");
580*4882a593Smuzhiyun 			return ret;
581*4882a593Smuzhiyun 		}
582*4882a593Smuzhiyun 
583*4882a593Smuzhiyun 		obj->cache_time_avarage = obj->cache_time_total / obj->loop_count;
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 		pr_debug("cache_time: total = %lld, average = %lld, count = %d, size = 0x%x\n",
586*4882a593Smuzhiyun 			 obj->cache_time_total, obj->cache_time_avarage,
587*4882a593Smuzhiyun 			 obj->loop_count, obj->buffer_size);
588*4882a593Smuzhiyun 
589*4882a593Smuzhiyun 		obj->cache_time_avarage = 0;
590*4882a593Smuzhiyun 		obj->cache_time_total = 0;
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 		obj->loop_count = 0;
593*4882a593Smuzhiyun 		break;
594*4882a593Smuzhiyun 	case PCIE_DMA_SET_LOOP_COUNT:
595*4882a593Smuzhiyun 		obj->loop_count_threshold = msg.count;
596*4882a593Smuzhiyun 		pr_info("threshold = %d\n", obj->loop_count_threshold);
597*4882a593Smuzhiyun 		break;
598*4882a593Smuzhiyun 	case PCIE_DMA_GET_TOTAL_BUFFER_SIZE:
599*4882a593Smuzhiyun 		msg_to_user.total_buffer_size = obj->local_mem_size;
600*4882a593Smuzhiyun 		ret = copy_to_user(uarg, &msg_to_user, sizeof(msg));
601*4882a593Smuzhiyun 		if (ret) {
602*4882a593Smuzhiyun 			dev_err(dev, "failed to get write buffer index\n");
603*4882a593Smuzhiyun 			return -EFAULT;
604*4882a593Smuzhiyun 		}
605*4882a593Smuzhiyun 		break;
606*4882a593Smuzhiyun 	case PCIE_DMA_SET_BUFFER_SIZE:
607*4882a593Smuzhiyun 		obj->buffer_size = msg.buffer_size;
608*4882a593Smuzhiyun 		pr_debug("buffer_size = %d\n", obj->buffer_size);
609*4882a593Smuzhiyun 		obj->rd_buf_size = obj->buffer_size * PCIE_DMA_BUF_CNT;
610*4882a593Smuzhiyun 		obj->wr_buf_size = obj->buffer_size * PCIE_DMA_BUF_CNT;
611*4882a593Smuzhiyun 		obj->ack_base = obj->rd_buf_size + obj->wr_buf_size;
612*4882a593Smuzhiyun 		obj->set_data_check_pos = obj->buffer_size - 0x4;
613*4882a593Smuzhiyun 		obj->set_local_idx_pos = obj->buffer_size - 0x8;
614*4882a593Smuzhiyun 		obj->set_buf_size_pos = obj->buffer_size - 0xc;
615*4882a593Smuzhiyun 		obj->set_chk_sum_pos = obj->buffer_size - 0x10;
616*4882a593Smuzhiyun 		break;
617*4882a593Smuzhiyun 	case PCIE_DMA_READ_FROM_REMOTE:
618*4882a593Smuzhiyun 		pr_debug("read buffer from : %d to local : %d\n",
619*4882a593Smuzhiyun 			 msg.in.r_widx, msg.in.l_widx);
620*4882a593Smuzhiyun 
621*4882a593Smuzhiyun 		type = PCIE_DMA_READ_REMOTE;
622*4882a593Smuzhiyun 		break;
623*4882a593Smuzhiyun 	case PCIE_DMA_USER_SET_BUF_ADDR:
624*4882a593Smuzhiyun 		/* If msg.local_addr valid, use msg.local_addr for local buffer,
625*4882a593Smuzhiyun 		 * and should be contiguous physical address.
626*4882a593Smuzhiyun 		 * If msg.local is zero, local buffer get from DT reserved.
627*4882a593Smuzhiyun 		 * Anyway local buffer address should send to remote, then
628*4882a593Smuzhiyun 		 * remote know where to send data to.
629*4882a593Smuzhiyun 		 * Should finish this case first before send data.
630*4882a593Smuzhiyun 		 */
631*4882a593Smuzhiyun 		if (msg.local_addr) {
632*4882a593Smuzhiyun 			pr_debug("local_addr = %pa\n", &msg.local_addr);
633*4882a593Smuzhiyun 			addr_send_to_remote = (phys_addr_t)msg.local_addr;
634*4882a593Smuzhiyun 			obj->local_mem_start = (phys_addr_t)msg.local_addr;
635*4882a593Smuzhiyun 			/* Unmap previous */
636*4882a593Smuzhiyun 			rk_pcie_unmap_kernel(obj->local_mem_base);
637*4882a593Smuzhiyun 			/* Remap userspace's buffer to kernel */
638*4882a593Smuzhiyun 			obj->local_mem_base = rk_pcie_map_kernel(obj->local_mem_start,
639*4882a593Smuzhiyun 						obj->buffer_size * PCIE_DMA_BUF_CNT * 2 + SZ_4K);
640*4882a593Smuzhiyun 			if (!obj->local_mem_base)
641*4882a593Smuzhiyun 				return -EFAULT;
642*4882a593Smuzhiyun 		} else {
643*4882a593Smuzhiyun 			addr_send_to_remote = obj->local_mem_start;
644*4882a593Smuzhiyun 		}
645*4882a593Smuzhiyun 		if (is_rc(obj))
646*4882a593Smuzhiyun 			writel(addr_send_to_remote, obj->region_base);
647*4882a593Smuzhiyun 		else
648*4882a593Smuzhiyun 			writel(addr_send_to_remote, obj->region_base + 0x4);
649*4882a593Smuzhiyun 		rk_pcie_send_addr_to_remote(obj);
650*4882a593Smuzhiyun 		hrtimer_start(&obj->scan_timer,
651*4882a593Smuzhiyun 		      ktime_set(0, 1 * 1000 * 1000 * 1000), HRTIMER_MODE_REL);
652*4882a593Smuzhiyun 		break;
653*4882a593Smuzhiyun 	case PCIE_DMA_GET_BUFFER_SIZE:
654*4882a593Smuzhiyun 		msg_to_user.buffer_size = obj->buffer_size;
655*4882a593Smuzhiyun 		ret = copy_to_user(uarg, &msg_to_user, sizeof(msg));
656*4882a593Smuzhiyun 		if (ret) {
657*4882a593Smuzhiyun 			dev_err(dev, "failed to get buffer\n");
658*4882a593Smuzhiyun 			return -EFAULT;
659*4882a593Smuzhiyun 		}
660*4882a593Smuzhiyun 		break;
661*4882a593Smuzhiyun 	default:
662*4882a593Smuzhiyun 		pr_info("%s, %d, cmd : %x not support\n", __func__, __LINE__,
663*4882a593Smuzhiyun 			cmd);
664*4882a593Smuzhiyun 		return -EFAULT;
665*4882a593Smuzhiyun 	}
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 	if (cmd == PCIE_DMA_START || cmd == PCIE_DMA_READ_FROM_REMOTE ||
668*4882a593Smuzhiyun 		cmd == PCIE_DMA_FREE_LOCAL_READ_BUFFER_INDEX) {
669*4882a593Smuzhiyun 		rk_pcie_prepare_dma(obj, msg.in.idx, msg.in.r_widx,
670*4882a593Smuzhiyun 				    msg.in.l_widx, msg.in.size, type,
671*4882a593Smuzhiyun 				    msg.in.chn);
672*4882a593Smuzhiyun 		queue_work(obj->dma_trx_wq, &obj->dma_trx_work);
673*4882a593Smuzhiyun 	}
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	return 0;
676*4882a593Smuzhiyun }
677*4882a593Smuzhiyun 
rk_pcie_misc_poll(struct file * filp,poll_table * wait)678*4882a593Smuzhiyun static unsigned int rk_pcie_misc_poll(struct file *filp,
679*4882a593Smuzhiyun 						poll_table *wait)
680*4882a593Smuzhiyun {
681*4882a593Smuzhiyun 	struct dma_trx_obj *obj = filp->private_data;
682*4882a593Smuzhiyun 	u32 lwa, rwa, lra;
683*4882a593Smuzhiyun 	u32 ret = 0;
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 	poll_wait(filp, &obj->event_queue, wait);
686*4882a593Smuzhiyun 
687*4882a593Smuzhiyun 	lwa = obj->local_write_available;
688*4882a593Smuzhiyun 	rwa = obj->remote_write_available;
689*4882a593Smuzhiyun 	if (lwa && rwa)
690*4882a593Smuzhiyun 		ret = POLLOUT;
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 	lra = obj->local_read_available;
693*4882a593Smuzhiyun 	if (lra)
694*4882a593Smuzhiyun 		ret |= POLLIN;
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 	return ret;
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun static const struct file_operations rk_pcie_misc_fops = {
700*4882a593Smuzhiyun 	.open		= rk_pcie_misc_open,
701*4882a593Smuzhiyun 	.release	= rk_pcie_misc_release,
702*4882a593Smuzhiyun 	.mmap		= rk_pcie_misc_mmap,
703*4882a593Smuzhiyun 	.unlocked_ioctl	= rk_pcie_misc_ioctl,
704*4882a593Smuzhiyun 	.poll		= rk_pcie_misc_poll,
705*4882a593Smuzhiyun };
706*4882a593Smuzhiyun 
rk_pcie_delete_misc(struct dma_trx_obj * obj)707*4882a593Smuzhiyun static void rk_pcie_delete_misc(struct dma_trx_obj *obj)
708*4882a593Smuzhiyun {
709*4882a593Smuzhiyun 	misc_deregister(&obj->pcie_dev->dev);
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun 
rk_pcie_add_misc(struct dma_trx_obj * obj)712*4882a593Smuzhiyun static int rk_pcie_add_misc(struct dma_trx_obj *obj)
713*4882a593Smuzhiyun {
714*4882a593Smuzhiyun 	int ret;
715*4882a593Smuzhiyun 	struct pcie_misc_dev *pcie_dev;
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 	pcie_dev = devm_kzalloc(obj->dev, sizeof(*pcie_dev), GFP_KERNEL);
718*4882a593Smuzhiyun 	if (!pcie_dev)
719*4882a593Smuzhiyun 		return -ENOMEM;
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 	pcie_dev->dev.minor = MISC_DYNAMIC_MINOR;
722*4882a593Smuzhiyun 	pcie_dev->dev.name = "pcie-dev";
723*4882a593Smuzhiyun 	pcie_dev->dev.fops = &rk_pcie_misc_fops;
724*4882a593Smuzhiyun 	pcie_dev->dev.parent = NULL;
725*4882a593Smuzhiyun 
726*4882a593Smuzhiyun 	ret = misc_register(&pcie_dev->dev);
727*4882a593Smuzhiyun 	if (ret) {
728*4882a593Smuzhiyun 		pr_err("pcie: failed to register misc device.\n");
729*4882a593Smuzhiyun 		return ret;
730*4882a593Smuzhiyun 	}
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun 	pcie_dev->obj = obj;
733*4882a593Smuzhiyun 	obj->pcie_dev = pcie_dev;
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun 	pr_info("register misc device pcie-dev\n");
736*4882a593Smuzhiyun 
737*4882a593Smuzhiyun 	return 0;
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun 
rk_pcie_map_kernel(phys_addr_t start,size_t len)740*4882a593Smuzhiyun static void *rk_pcie_map_kernel(phys_addr_t start, size_t len)
741*4882a593Smuzhiyun {
742*4882a593Smuzhiyun 	int i;
743*4882a593Smuzhiyun 	void *vaddr;
744*4882a593Smuzhiyun 	pgprot_t pgprot;
745*4882a593Smuzhiyun 	phys_addr_t phys;
746*4882a593Smuzhiyun 	int npages = PAGE_ALIGN(len) / PAGE_SIZE;
747*4882a593Smuzhiyun 	struct page **p = vmalloc(sizeof(struct page *) * npages);
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 	if (!p)
750*4882a593Smuzhiyun 		return NULL;
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun 	pgprot = pgprot_noncached(PAGE_KERNEL);
753*4882a593Smuzhiyun 
754*4882a593Smuzhiyun 	phys = start;
755*4882a593Smuzhiyun 	for (i = 0; i < npages; i++) {
756*4882a593Smuzhiyun 		p[i] = phys_to_page(phys);
757*4882a593Smuzhiyun 		phys += PAGE_SIZE;
758*4882a593Smuzhiyun 	}
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 	vaddr = vmap(p, npages, VM_MAP, pgprot);
761*4882a593Smuzhiyun 	vfree(p);
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun 	return vaddr;
764*4882a593Smuzhiyun }
765*4882a593Smuzhiyun 
rk_pcie_unmap_kernel(void * vaddr)766*4882a593Smuzhiyun static void rk_pcie_unmap_kernel(void *vaddr)
767*4882a593Smuzhiyun {
768*4882a593Smuzhiyun 	vunmap(vaddr);
769*4882a593Smuzhiyun }
770*4882a593Smuzhiyun 
rk_pcie_dma_table_free(struct dma_trx_obj * obj,int num)771*4882a593Smuzhiyun static void rk_pcie_dma_table_free(struct dma_trx_obj *obj, int num)
772*4882a593Smuzhiyun {
773*4882a593Smuzhiyun 	int i;
774*4882a593Smuzhiyun 	struct dma_table *table;
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 	if (num > PCIE_DMA_TABLE_NUM)
777*4882a593Smuzhiyun 		num = PCIE_DMA_TABLE_NUM;
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 	for (i = 0; i < num; i++) {
780*4882a593Smuzhiyun 		table = obj->table[i];
781*4882a593Smuzhiyun 		dma_free_coherent(obj->dev, PCIE_DMA_PARAM_SIZE,
782*4882a593Smuzhiyun 			table->descs, table->phys_descs);
783*4882a593Smuzhiyun 		kfree(table);
784*4882a593Smuzhiyun 	}
785*4882a593Smuzhiyun }
786*4882a593Smuzhiyun 
rk_pcie_dma_table_alloc(struct dma_trx_obj * obj)787*4882a593Smuzhiyun static int rk_pcie_dma_table_alloc(struct dma_trx_obj *obj)
788*4882a593Smuzhiyun {
789*4882a593Smuzhiyun 	int i;
790*4882a593Smuzhiyun 	struct dma_table *table;
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun 	for (i = 0; i < PCIE_DMA_TABLE_NUM; i++) {
793*4882a593Smuzhiyun 		table = kzalloc(sizeof(*table), GFP_KERNEL);
794*4882a593Smuzhiyun 		if (!table)
795*4882a593Smuzhiyun 			goto free_table;
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 		table->descs = dma_alloc_coherent(obj->dev, PCIE_DMA_PARAM_SIZE,
798*4882a593Smuzhiyun 				&table->phys_descs, GFP_KERNEL | __GFP_ZERO);
799*4882a593Smuzhiyun 		if (!table->descs) {
800*4882a593Smuzhiyun 			kfree(table);
801*4882a593Smuzhiyun 			goto free_table;
802*4882a593Smuzhiyun 		}
803*4882a593Smuzhiyun 
804*4882a593Smuzhiyun 		table->chn = PCIE_DMA_DEFAULT_CHN;
805*4882a593Smuzhiyun 		INIT_LIST_HEAD(&table->tbl_node);
806*4882a593Smuzhiyun 		obj->table[i] = table;
807*4882a593Smuzhiyun 	}
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun 	return 0;
810*4882a593Smuzhiyun 
811*4882a593Smuzhiyun free_table:
812*4882a593Smuzhiyun 	rk_pcie_dma_table_free(obj, i);
813*4882a593Smuzhiyun 	dev_err(obj->dev, "Failed to alloc dma table\n");
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun 	return -ENOMEM;
816*4882a593Smuzhiyun }
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
rk_pcie_debugfs_trx_show(struct seq_file * s,void * v)819*4882a593Smuzhiyun static int rk_pcie_debugfs_trx_show(struct seq_file *s, void *v)
820*4882a593Smuzhiyun {
821*4882a593Smuzhiyun 	struct dma_trx_obj *dma_obj = s->private;
822*4882a593Smuzhiyun 	bool list = list_empty(&dma_obj->tbl_list);
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun 	seq_printf(s, "version = %x,", dma_obj->version);
825*4882a593Smuzhiyun 	seq_printf(s, "last:%s,",
826*4882a593Smuzhiyun 			dma_obj->cur ? (dma_obj->cur->dir == DMA_FROM_BUS ? "read" : "write") : "no trx");
827*4882a593Smuzhiyun 	seq_printf(s, "irq_num = %ld, loop_count = %d,",
828*4882a593Smuzhiyun 			dma_obj->irq_num, dma_obj->loop_count);
829*4882a593Smuzhiyun 	seq_printf(s, "loop_threshold = %d,",
830*4882a593Smuzhiyun 			dma_obj->loop_count_threshold);
831*4882a593Smuzhiyun 	seq_printf(s, "lwa = %lx, rwa = %lx, lra = %lx,",
832*4882a593Smuzhiyun 			dma_obj->local_write_available,
833*4882a593Smuzhiyun 			dma_obj->remote_write_available,
834*4882a593Smuzhiyun 			dma_obj->local_read_available);
835*4882a593Smuzhiyun 	seq_printf(s, "list : (%s), dma chn : (%s)\n",
836*4882a593Smuzhiyun 			list ? "empty" : "not empty",
837*4882a593Smuzhiyun 			dma_obj->dma_free ? "free" : "busy");
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun 	return 0;
840*4882a593Smuzhiyun }
841*4882a593Smuzhiyun 
rk_pcie_debugfs_open(struct inode * inode,struct file * file)842*4882a593Smuzhiyun static int rk_pcie_debugfs_open(struct inode *inode, struct file *file)
843*4882a593Smuzhiyun {
844*4882a593Smuzhiyun 	return single_open(file, rk_pcie_debugfs_trx_show, inode->i_private);
845*4882a593Smuzhiyun }
846*4882a593Smuzhiyun 
rk_pcie_debugfs_write(struct file * file,const char __user * user_buf,size_t count,loff_t * ppos)847*4882a593Smuzhiyun static ssize_t rk_pcie_debugfs_write(struct file *file, const char __user *user_buf,
848*4882a593Smuzhiyun 				 size_t count, loff_t *ppos)
849*4882a593Smuzhiyun {
850*4882a593Smuzhiyun 	int ret;
851*4882a593Smuzhiyun 
852*4882a593Smuzhiyun 	ret = kstrtoint_from_user(user_buf, count, 0, &enable_check_sum);
853*4882a593Smuzhiyun 	if (ret)
854*4882a593Smuzhiyun 		return ret;
855*4882a593Smuzhiyun 
856*4882a593Smuzhiyun 	return count;
857*4882a593Smuzhiyun }
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun static const struct file_operations rk_pcie_debugfs_fops = {
860*4882a593Smuzhiyun 	.owner = THIS_MODULE,
861*4882a593Smuzhiyun 	.open = rk_pcie_debugfs_open,
862*4882a593Smuzhiyun 	.read = seq_read,
863*4882a593Smuzhiyun 	.llseek = seq_lseek,
864*4882a593Smuzhiyun 	.release = single_release,
865*4882a593Smuzhiyun 	.write = rk_pcie_debugfs_write,
866*4882a593Smuzhiyun };
867*4882a593Smuzhiyun #endif
868*4882a593Smuzhiyun 
rk_pcie_dma_obj_probe(struct device * dev)869*4882a593Smuzhiyun struct dma_trx_obj *rk_pcie_dma_obj_probe(struct device *dev)
870*4882a593Smuzhiyun {
871*4882a593Smuzhiyun 	int ret;
872*4882a593Smuzhiyun 	int busno;
873*4882a593Smuzhiyun 	struct device_node *np = dev->of_node;
874*4882a593Smuzhiyun 	struct device_node *mem;
875*4882a593Smuzhiyun 	struct resource reg;
876*4882a593Smuzhiyun 	struct dma_trx_obj *obj;
877*4882a593Smuzhiyun 	int reverse;
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun 	obj = devm_kzalloc(dev, sizeof(struct dma_trx_obj), GFP_KERNEL);
880*4882a593Smuzhiyun 	if (!obj)
881*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun 	obj->dev = dev;
884*4882a593Smuzhiyun 
885*4882a593Smuzhiyun 	ret = of_property_read_u32(np, "busno", &busno);
886*4882a593Smuzhiyun 	if (ret < 0) {
887*4882a593Smuzhiyun 		dev_err(dev, "missing \"busno\" property\n");
888*4882a593Smuzhiyun 		return ERR_PTR(ret);
889*4882a593Smuzhiyun 	}
890*4882a593Smuzhiyun 
891*4882a593Smuzhiyun 	obj->busno = busno;
892*4882a593Smuzhiyun 
893*4882a593Smuzhiyun 	ret = of_property_read_u32(np, "reverse", &reverse);
894*4882a593Smuzhiyun 	if (ret < 0)
895*4882a593Smuzhiyun 		obj->addr_reverse = 0;
896*4882a593Smuzhiyun 	else
897*4882a593Smuzhiyun 		obj->addr_reverse = reverse;
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun 	mem = of_parse_phandle(np, "memory-region", 0);
900*4882a593Smuzhiyun 	if (!mem) {
901*4882a593Smuzhiyun 		dev_err(dev, "missing \"memory-region\" property\n");
902*4882a593Smuzhiyun 		return ERR_PTR(-ENODEV);
903*4882a593Smuzhiyun 	}
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun 	ret = of_address_to_resource(mem, 0, &reg);
906*4882a593Smuzhiyun 	if (ret < 0) {
907*4882a593Smuzhiyun 		dev_err(dev, "missing \"reg\" property\n");
908*4882a593Smuzhiyun 		return ERR_PTR(-ENODEV);
909*4882a593Smuzhiyun 	}
910*4882a593Smuzhiyun 
911*4882a593Smuzhiyun 	obj->local_mem_start = reg.start;
912*4882a593Smuzhiyun 	obj->local_mem_size = resource_size(&reg);
913*4882a593Smuzhiyun 	obj->local_mem_base = rk_pcie_map_kernel(obj->local_mem_start,
914*4882a593Smuzhiyun 						 obj->local_mem_size);
915*4882a593Smuzhiyun 	if (!obj->local_mem_base)
916*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
917*4882a593Smuzhiyun 
918*4882a593Smuzhiyun 	mem = of_parse_phandle(np, "memory-region1", 0);
919*4882a593Smuzhiyun 	if (!mem) {
920*4882a593Smuzhiyun 		dev_err(dev, "missing \"memory-region1\" property\n");
921*4882a593Smuzhiyun 		obj = ERR_PTR(-ENODEV);
922*4882a593Smuzhiyun 		goto unmap_local_mem_region;
923*4882a593Smuzhiyun 	}
924*4882a593Smuzhiyun 
925*4882a593Smuzhiyun 	ret = of_address_to_resource(mem, 0, &reg);
926*4882a593Smuzhiyun 	if (ret < 0) {
927*4882a593Smuzhiyun 		dev_err(dev, "missing \"reg\" property\n");
928*4882a593Smuzhiyun 		obj = ERR_PTR(-ENODEV);
929*4882a593Smuzhiyun 		goto unmap_local_mem_region;
930*4882a593Smuzhiyun 	}
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 	obj->region_start = reg.start;
933*4882a593Smuzhiyun 	obj->region_size = resource_size(&reg);
934*4882a593Smuzhiyun 	obj->region_base = rk_pcie_map_kernel(obj->region_start,
935*4882a593Smuzhiyun 					      obj->region_size);
936*4882a593Smuzhiyun 	if (!obj->region_base) {
937*4882a593Smuzhiyun 		dev_err(dev, "mapping region_base error\n");
938*4882a593Smuzhiyun 		obj = ERR_PTR(-ENOMEM);
939*4882a593Smuzhiyun 		goto unmap_local_mem_region;
940*4882a593Smuzhiyun 	}
941*4882a593Smuzhiyun 	if (!is_rc(obj))
942*4882a593Smuzhiyun 		writel(0x0, obj->region_base);
943*4882a593Smuzhiyun 	else
944*4882a593Smuzhiyun 		writel(0x0, obj->region_base + 0x4);
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun 	ret = rk_pcie_dma_table_alloc(obj);
947*4882a593Smuzhiyun 	if (ret) {
948*4882a593Smuzhiyun 		dev_err(dev, "rk_pcie_dma_table_alloc error\n");
949*4882a593Smuzhiyun 		obj = ERR_PTR(-ENOMEM);
950*4882a593Smuzhiyun 		goto unmap_region;
951*4882a593Smuzhiyun 
952*4882a593Smuzhiyun 	}
953*4882a593Smuzhiyun 	obj->dma_trx_wq = create_singlethread_workqueue("dma_trx_wq");
954*4882a593Smuzhiyun 	INIT_WORK(&obj->dma_trx_work, rk_pcie_dma_trx_work);
955*4882a593Smuzhiyun 
956*4882a593Smuzhiyun 	INIT_LIST_HEAD(&obj->tbl_list);
957*4882a593Smuzhiyun 	spin_lock_init(&obj->tbl_list_lock);
958*4882a593Smuzhiyun 
959*4882a593Smuzhiyun 	init_waitqueue_head(&obj->event_queue);
960*4882a593Smuzhiyun 
961*4882a593Smuzhiyun 	hrtimer_init_on_stack(&obj->scan_timer, CLOCK_MONOTONIC,
962*4882a593Smuzhiyun 				HRTIMER_MODE_REL);
963*4882a593Smuzhiyun 	obj->scan_timer.function = rk_pcie_scan_timer;
964*4882a593Smuzhiyun 	obj->irq_num = 0;
965*4882a593Smuzhiyun 	obj->loop_count_threshold = 0;
966*4882a593Smuzhiyun 	obj->ref_count = 0;
967*4882a593Smuzhiyun 	obj->version = 0x4;
968*4882a593Smuzhiyun 	init_completion(&obj->done);
969*4882a593Smuzhiyun 	obj->cb = rk_pcie_handle_dma_interrupt;
970*4882a593Smuzhiyun 
971*4882a593Smuzhiyun 	mutex_init(&obj->count_mutex);
972*4882a593Smuzhiyun 	rk_pcie_add_misc(obj);
973*4882a593Smuzhiyun 
974*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
975*4882a593Smuzhiyun 	obj->pcie_root = debugfs_create_dir("pcie", NULL);
976*4882a593Smuzhiyun 	if (!obj->pcie_root) {
977*4882a593Smuzhiyun 		obj = ERR_PTR(-EINVAL);
978*4882a593Smuzhiyun 		goto free_dma_table;
979*4882a593Smuzhiyun 	}
980*4882a593Smuzhiyun 
981*4882a593Smuzhiyun 	debugfs_create_file("pcie_trx", 0644, obj->pcie_root, obj,
982*4882a593Smuzhiyun 			&rk_pcie_debugfs_fops);
983*4882a593Smuzhiyun #endif
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun 	return obj;
986*4882a593Smuzhiyun free_dma_table:
987*4882a593Smuzhiyun 	rk_pcie_dma_table_free(obj, PCIE_DMA_TABLE_NUM);
988*4882a593Smuzhiyun unmap_region:
989*4882a593Smuzhiyun 	rk_pcie_unmap_kernel(obj->region_base);
990*4882a593Smuzhiyun unmap_local_mem_region:
991*4882a593Smuzhiyun 	rk_pcie_unmap_kernel(obj->local_mem_base);
992*4882a593Smuzhiyun 
993*4882a593Smuzhiyun 	return obj;
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(rk_pcie_dma_obj_probe);
996*4882a593Smuzhiyun 
rk_pcie_dma_obj_remove(struct dma_trx_obj * obj)997*4882a593Smuzhiyun void rk_pcie_dma_obj_remove(struct dma_trx_obj *obj)
998*4882a593Smuzhiyun {
999*4882a593Smuzhiyun 	hrtimer_cancel(&obj->scan_timer);
1000*4882a593Smuzhiyun 	destroy_hrtimer_on_stack(&obj->scan_timer);
1001*4882a593Smuzhiyun 	rk_pcie_delete_misc(obj);
1002*4882a593Smuzhiyun 	rk_pcie_unmap_kernel(obj->local_mem_base);
1003*4882a593Smuzhiyun 	rk_pcie_dma_table_free(obj, PCIE_DMA_TABLE_NUM);
1004*4882a593Smuzhiyun 	destroy_workqueue(obj->dma_trx_wq);
1005*4882a593Smuzhiyun 
1006*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
1007*4882a593Smuzhiyun 	debugfs_remove_recursive(obj->pcie_root);
1008*4882a593Smuzhiyun #endif
1009*4882a593Smuzhiyun }
1010*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(rk_pcie_dma_obj_remove);
1011