xref: /OK3568_Linux_fs/kernel/drivers/pci/controller/dwc/pcie-dw-dmatest.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: (GPL-2.0+ OR MIT)
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2022 Rockchip Electronics Co., Ltd.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/init.h>
9*4882a593Smuzhiyun #include <linux/ktime.h>
10*4882a593Smuzhiyun #include <linux/module.h>
11*4882a593Smuzhiyun #include <linux/moduleparam.h>
12*4882a593Smuzhiyun #include <linux/err.h>
13*4882a593Smuzhiyun #include <linux/random.h>
14*4882a593Smuzhiyun #include <linux/slab.h>
15*4882a593Smuzhiyun #include <linux/sched.h>
16*4882a593Smuzhiyun #include <linux/kthread.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include "pcie-designware.h"
19*4882a593Smuzhiyun #include "pcie-dw-dmatest.h"
20*4882a593Smuzhiyun #include "../rockchip-pcie-dma.h"
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun static int test_size = 0x20;
23*4882a593Smuzhiyun module_param_named(size,  test_size, int, 0644);
24*4882a593Smuzhiyun MODULE_PARM_DESC(size, "each packet size in bytes");
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun static unsigned int cycles_count = 1;
27*4882a593Smuzhiyun module_param(cycles_count, uint, 0644);
28*4882a593Smuzhiyun MODULE_PARM_DESC(cycles_count, "how many erase cycles to do (default 1)");
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun static unsigned int chn_en = 1;
31*4882a593Smuzhiyun module_param(chn_en, uint, 0644);
32*4882a593Smuzhiyun MODULE_PARM_DESC(chn_en, "Each bits for one dma channel, up to 2 channels, (default enable channel 0)");
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun static unsigned int rw_test = 3;
35*4882a593Smuzhiyun module_param(rw_test, uint, 0644);
36*4882a593Smuzhiyun MODULE_PARM_DESC(rw_test, "Read/Write test, 1-read 2-write 3-both(default 3)");
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun static unsigned int bus_addr = 0x3c000000;
39*4882a593Smuzhiyun module_param(bus_addr, uint, 0644);
40*4882a593Smuzhiyun MODULE_PARM_DESC(bus_addr, "Dmatest chn0 bus_addr(remote), chn1 add offset 0x100000, (default 0x3c000000)");
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun static unsigned int local_addr = 0x3c000000;
43*4882a593Smuzhiyun module_param(local_addr, uint, 0644);
44*4882a593Smuzhiyun MODULE_PARM_DESC(local_addr, "Dmatest chn0 local_addr(local), chn1 add offset 0x100000, (default 0x3c000000)");
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun static unsigned int test_dev;
47*4882a593Smuzhiyun module_param(test_dev, uint, 0644);
48*4882a593Smuzhiyun MODULE_PARM_DESC(test_dev, "Choose dma_obj device,(default 0)");
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun static bool is_rc = true;
51*4882a593Smuzhiyun module_param_named(is_rc, is_rc, bool, 0644);
52*4882a593Smuzhiyun MODULE_PARM_DESC(is_rc, "Test port is rc(default true)");
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun #define PCIE_DW_MISC_DMATEST_DEV_MAX 5
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun #define PCIE_DMA_CHANEL_MAX_NUM		2
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun struct pcie_dw_dmatest_dev {
59*4882a593Smuzhiyun 	struct dma_trx_obj *obj;
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun 	bool irq_en;
62*4882a593Smuzhiyun 	struct completion rd_done[PCIE_DMA_CHANEL_MAX_NUM];
63*4882a593Smuzhiyun 	struct completion wr_done[PCIE_DMA_CHANEL_MAX_NUM];
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	struct mutex rd_lock[PCIE_DMA_CHANEL_MAX_NUM];	/* Corresponding to each read DMA channel */
66*4882a593Smuzhiyun 	struct mutex wr_lock[PCIE_DMA_CHANEL_MAX_NUM];	/* Corresponding to each write DMA channel */
67*4882a593Smuzhiyun };
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun static struct pcie_dw_dmatest_dev s_dmatest_dev[PCIE_DW_MISC_DMATEST_DEV_MAX];
70*4882a593Smuzhiyun static int cur_dmatest_dev;
71*4882a593Smuzhiyun 
pcie_dw_dmatest_show(void)72*4882a593Smuzhiyun static void pcie_dw_dmatest_show(void)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	int i;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	for (i = 0; i < PCIE_DW_MISC_DMATEST_DEV_MAX; i++) {
77*4882a593Smuzhiyun 		if (s_dmatest_dev[i].obj)
78*4882a593Smuzhiyun 			dev_info(s_dmatest_dev[i].obj->dev, " test_dev index %d\n", i);
79*4882a593Smuzhiyun 		else
80*4882a593Smuzhiyun 			break;
81*4882a593Smuzhiyun 	}
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	dev_info(s_dmatest_dev[test_dev].obj->dev, " is current test_dev\n");
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun 
rk_pcie_dma_wait_for_finised(struct dma_trx_obj * obj,struct dma_table * table)86*4882a593Smuzhiyun static int rk_pcie_dma_wait_for_finised(struct dma_trx_obj *obj, struct dma_table *table)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun 	int ret;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	do {
91*4882a593Smuzhiyun 		ret = obj->get_dma_status(obj, table->chn, table->dir);
92*4882a593Smuzhiyun 	} while (!ret);
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	return ret;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun 
rk_pcie_ep_dma_frombus(struct pcie_dw_dmatest_dev * dmatest_dev,u32 chn,u32 local_paddr,u32 bus_paddr,u32 size)97*4882a593Smuzhiyun static int rk_pcie_ep_dma_frombus(struct pcie_dw_dmatest_dev *dmatest_dev, u32 chn,
98*4882a593Smuzhiyun 				  u32 local_paddr, u32 bus_paddr, u32 size)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun 	struct dma_table *table;
101*4882a593Smuzhiyun 	struct dma_trx_obj *obj = dmatest_dev->obj;
102*4882a593Smuzhiyun 	int ret;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	if (chn >= PCIE_DMA_CHANEL_MAX_NUM)
105*4882a593Smuzhiyun 		return -1;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	table = kzalloc(sizeof(struct dma_table), GFP_KERNEL);
108*4882a593Smuzhiyun 	if (!table)
109*4882a593Smuzhiyun 		return -ENOMEM;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	mutex_lock(&dmatest_dev->rd_lock[chn]);
112*4882a593Smuzhiyun 	if (dmatest_dev->irq_en)
113*4882a593Smuzhiyun 		reinit_completion(&dmatest_dev->rd_done[chn]);
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	table->buf_size = size;
116*4882a593Smuzhiyun 	table->bus = bus_paddr;
117*4882a593Smuzhiyun 	table->local = local_paddr;
118*4882a593Smuzhiyun 	table->chn = chn;
119*4882a593Smuzhiyun 	table->dir = DMA_FROM_BUS;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 	obj->config_dma_func(table);
122*4882a593Smuzhiyun 	obj->start_dma_func(obj, table);
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	if (dmatest_dev->irq_en) {
125*4882a593Smuzhiyun 		ret = wait_for_completion_interruptible_timeout(&dmatest_dev->rd_done[chn], HZ);
126*4882a593Smuzhiyun 		if (ret < 0)
127*4882a593Smuzhiyun 			dev_err(obj->dev, "%s interrupted\n", __func__);
128*4882a593Smuzhiyun 		else if (ret == 0)
129*4882a593Smuzhiyun 			dev_err(obj->dev, "%s timed out\n", __func__);
130*4882a593Smuzhiyun 	} else {
131*4882a593Smuzhiyun 		ret = rk_pcie_dma_wait_for_finised(obj, table);
132*4882a593Smuzhiyun 	}
133*4882a593Smuzhiyun 	mutex_unlock(&dmatest_dev->rd_lock[chn]);
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	kfree(table);
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	return ret;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun 
rk_pcie_ep_dma_tobus(struct pcie_dw_dmatest_dev * dmatest_dev,u32 chn,u32 bus_paddr,u32 local_paddr,u32 size)140*4882a593Smuzhiyun static int rk_pcie_ep_dma_tobus(struct pcie_dw_dmatest_dev *dmatest_dev, u32 chn,
141*4882a593Smuzhiyun 				u32 bus_paddr, u32 local_paddr, u32 size)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun 	struct dma_table *table;
144*4882a593Smuzhiyun 	struct dma_trx_obj *obj = dmatest_dev->obj;
145*4882a593Smuzhiyun 	int ret;
146*4882a593Smuzhiyun 
147*4882a593Smuzhiyun 	if (chn >= PCIE_DMA_CHANEL_MAX_NUM)
148*4882a593Smuzhiyun 		return -1;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	table = kzalloc(sizeof(struct dma_table), GFP_KERNEL);
151*4882a593Smuzhiyun 	if (!table)
152*4882a593Smuzhiyun 		return -ENOMEM;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	mutex_lock(&dmatest_dev->wr_lock[chn]);
155*4882a593Smuzhiyun 	if (dmatest_dev->irq_en)
156*4882a593Smuzhiyun 		reinit_completion(&dmatest_dev->wr_done[chn]);
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	table->buf_size = size;
159*4882a593Smuzhiyun 	table->bus = bus_paddr;
160*4882a593Smuzhiyun 	table->local = local_paddr;
161*4882a593Smuzhiyun 	table->chn = chn;
162*4882a593Smuzhiyun 	table->dir = DMA_TO_BUS;
163*4882a593Smuzhiyun 
164*4882a593Smuzhiyun 	obj->config_dma_func(table);
165*4882a593Smuzhiyun 	obj->start_dma_func(obj, table);
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	if (dmatest_dev->irq_en) {
168*4882a593Smuzhiyun 		ret = wait_for_completion_interruptible_timeout(&dmatest_dev->wr_done[chn], HZ);
169*4882a593Smuzhiyun 		if (ret < 0)
170*4882a593Smuzhiyun 			dev_err(obj->dev, "%s interrupted\n", __func__);
171*4882a593Smuzhiyun 		else if (ret == 0)
172*4882a593Smuzhiyun 			dev_err(obj->dev, "%s timed out\n", __func__);
173*4882a593Smuzhiyun 	} else {
174*4882a593Smuzhiyun 		ret = rk_pcie_dma_wait_for_finised(obj, table);
175*4882a593Smuzhiyun 	}
176*4882a593Smuzhiyun 	mutex_unlock(&dmatest_dev->wr_lock[chn]);
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	kfree(table);
179*4882a593Smuzhiyun 
180*4882a593Smuzhiyun 	return ret;
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun 
rk_pcie_rc_dma_frombus(struct pcie_dw_dmatest_dev * dmatest_dev,u32 chn,u32 local_paddr,u32 bus_paddr,u32 size)183*4882a593Smuzhiyun static int rk_pcie_rc_dma_frombus(struct pcie_dw_dmatest_dev *dmatest_dev, u32 chn,
184*4882a593Smuzhiyun 				  u32 local_paddr, u32 bus_paddr, u32 size)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun 	return rk_pcie_ep_dma_tobus(dmatest_dev, chn, local_paddr, bus_paddr, size);
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun 
rk_pcie_rc_dma_tobus(struct pcie_dw_dmatest_dev * dmatest_dev,u32 chn,u32 bus_paddr,u32 local_paddr,u32 size)189*4882a593Smuzhiyun static int rk_pcie_rc_dma_tobus(struct pcie_dw_dmatest_dev *dmatest_dev, u32 chn,
190*4882a593Smuzhiyun 				u32 bus_paddr, u32 local_paddr, u32 size)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun 	return rk_pcie_ep_dma_frombus(dmatest_dev, chn, bus_paddr, local_paddr, size);
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun 
rk_pcie_dma_interrupt_handler_call_back(struct dma_trx_obj * obj,u32 chn,enum dma_dir dir)195*4882a593Smuzhiyun static int rk_pcie_dma_interrupt_handler_call_back(struct dma_trx_obj *obj, u32 chn, enum dma_dir dir)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun 	struct pcie_dw_dmatest_dev *dmatest_dev = (struct pcie_dw_dmatest_dev *)obj->priv;
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 	if (chn >= PCIE_DMA_CHANEL_MAX_NUM)
200*4882a593Smuzhiyun 		return -1;
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	if (dir == DMA_FROM_BUS)
203*4882a593Smuzhiyun 		complete(&dmatest_dev->rd_done[chn]);
204*4882a593Smuzhiyun 	else
205*4882a593Smuzhiyun 		complete(&dmatest_dev->wr_done[chn]);
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	return 0;
208*4882a593Smuzhiyun }
209*4882a593Smuzhiyun 
pcie_dw_dmatest_register(struct device * dev,bool irq_en)210*4882a593Smuzhiyun struct dma_trx_obj *pcie_dw_dmatest_register(struct device *dev, bool irq_en)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun 	struct dma_trx_obj *obj;
213*4882a593Smuzhiyun 	struct pcie_dw_dmatest_dev *dmatest_dev = &s_dmatest_dev[cur_dmatest_dev];
214*4882a593Smuzhiyun 	int i;
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	obj = devm_kzalloc(dev, sizeof(struct dma_trx_obj), GFP_KERNEL);
217*4882a593Smuzhiyun 	if (!obj)
218*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	obj->dev = dev;
221*4882a593Smuzhiyun 	obj->priv = dmatest_dev;
222*4882a593Smuzhiyun 	obj->cb = rk_pcie_dma_interrupt_handler_call_back;
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun 	/* Save for dmatest */
225*4882a593Smuzhiyun 	dmatest_dev->obj = obj;
226*4882a593Smuzhiyun 	for (i = 0; i < PCIE_DMA_CHANEL_MAX_NUM; i++) {
227*4882a593Smuzhiyun 		init_completion(&dmatest_dev->rd_done[i]);
228*4882a593Smuzhiyun 		init_completion(&dmatest_dev->wr_done[i]);
229*4882a593Smuzhiyun 		mutex_init(&dmatest_dev->rd_lock[i]);
230*4882a593Smuzhiyun 		mutex_init(&dmatest_dev->wr_lock[i]);
231*4882a593Smuzhiyun 	}
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	/* Enable IRQ transfer as default */
234*4882a593Smuzhiyun 	dmatest_dev->irq_en = irq_en;
235*4882a593Smuzhiyun 	cur_dmatest_dev++;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	return obj;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun 
dma_test(struct pcie_dw_dmatest_dev * dmatest_dev,u32 chn,u32 bus_paddr,u32 local_paddr,u32 size,u32 loop,u8 rd_en,u8 wr_en)240*4882a593Smuzhiyun static int dma_test(struct pcie_dw_dmatest_dev *dmatest_dev, u32 chn,
241*4882a593Smuzhiyun 		    u32 bus_paddr, u32 local_paddr, u32 size, u32 loop, u8 rd_en, u8 wr_en)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun 	ktime_t start_time;
244*4882a593Smuzhiyun 	ktime_t end_time;
245*4882a593Smuzhiyun 	ktime_t cost_time;
246*4882a593Smuzhiyun 	u32 i;
247*4882a593Smuzhiyun 	long long total_byte;
248*4882a593Smuzhiyun 	long long us = 0;
249*4882a593Smuzhiyun 	struct dma_trx_obj *obj = dmatest_dev->obj;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	/*
252*4882a593Smuzhiyun 	 * Clean the cache to ensure memory consistency. The CPU writes to the normal memory
253*4882a593Smuzhiyun 	 * cache before the transmission is initiated, which may cause IO consistency problems,
254*4882a593Smuzhiyun 	 * such as IO commands.
255*4882a593Smuzhiyun 	 */
256*4882a593Smuzhiyun 	if (rd_en)
257*4882a593Smuzhiyun 		dma_sync_single_for_device(obj->dev, local_paddr, size, DMA_TO_DEVICE);
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	start_time = ktime_get();
260*4882a593Smuzhiyun 	for (i = 0; i < loop; i++) {
261*4882a593Smuzhiyun 		if (rd_en) {
262*4882a593Smuzhiyun 			if (is_rc)
263*4882a593Smuzhiyun 				rk_pcie_rc_dma_frombus(dmatest_dev, chn, local_paddr, bus_paddr, size);
264*4882a593Smuzhiyun 			else
265*4882a593Smuzhiyun 				rk_pcie_ep_dma_frombus(dmatest_dev, chn, local_paddr, bus_paddr, size);
266*4882a593Smuzhiyun 			dma_sync_single_for_cpu(obj->dev, local_paddr, size, DMA_FROM_DEVICE);
267*4882a593Smuzhiyun 		}
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 		if (wr_en) {
270*4882a593Smuzhiyun 			dma_sync_single_for_device(obj->dev, local_paddr, size, DMA_TO_DEVICE);
271*4882a593Smuzhiyun 			if (is_rc)
272*4882a593Smuzhiyun 				rk_pcie_rc_dma_tobus(dmatest_dev, chn, bus_paddr, local_paddr, size);
273*4882a593Smuzhiyun 			else
274*4882a593Smuzhiyun 				rk_pcie_ep_dma_tobus(dmatest_dev, chn, bus_paddr, local_paddr, size);
275*4882a593Smuzhiyun 		}
276*4882a593Smuzhiyun 	}
277*4882a593Smuzhiyun 	end_time = ktime_get();
278*4882a593Smuzhiyun 	cost_time = ktime_sub(end_time, start_time);
279*4882a593Smuzhiyun 	us = ktime_to_us(cost_time);
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	total_byte = (wr_en + rd_en) * size * loop; /* 1 rd,1 wr */
282*4882a593Smuzhiyun 	total_byte = total_byte * (1000000 / 1024) / us;
283*4882a593Smuzhiyun 	pr_err("pcie dma %s/%s test (%d+%d)*%d*%d cost %lldus speed:%lldKB/S\n",
284*4882a593Smuzhiyun 	       wr_en ? "wr" : "", rd_en ? "rd" : "", wr_en, rd_en, size, loop, us, total_byte);
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	return 0;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun 
dma_test_ch0(void * p)289*4882a593Smuzhiyun static int dma_test_ch0(void *p)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun 	dma_test(&s_dmatest_dev[test_dev], 0, bus_addr, local_addr, test_size,
292*4882a593Smuzhiyun 		 cycles_count, rw_test & 0x1, (rw_test & 0x2) >> 1);
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 	return 0;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun 
dma_test_ch1(void * p)297*4882a593Smuzhiyun static int dma_test_ch1(void *p)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun 	/* Test in different area with ch0 */
300*4882a593Smuzhiyun 	if (chn_en == 3)
301*4882a593Smuzhiyun 		dma_test(&s_dmatest_dev[test_dev], 1, bus_addr + test_size, local_addr + test_size, test_size,
302*4882a593Smuzhiyun 			 cycles_count, rw_test & 0x1, (rw_test & 0x2) >> 1);
303*4882a593Smuzhiyun 	else
304*4882a593Smuzhiyun 		dma_test(&s_dmatest_dev[test_dev], 1, bus_addr, local_addr, test_size,
305*4882a593Smuzhiyun 			 cycles_count, rw_test & 0x1, (rw_test & 0x2) >> 1);
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	return 0;
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun 
dma_run(void)310*4882a593Smuzhiyun static int dma_run(void)
311*4882a593Smuzhiyun {
312*4882a593Smuzhiyun 	if (chn_en == 3) {
313*4882a593Smuzhiyun 		kthread_run(dma_test_ch0, NULL, "dma_test_ch0");
314*4882a593Smuzhiyun 		kthread_run(dma_test_ch1, NULL, "dma_test_ch1");
315*4882a593Smuzhiyun 	} else if (chn_en == 2) {
316*4882a593Smuzhiyun 		dma_test_ch1(NULL);
317*4882a593Smuzhiyun 	} else {
318*4882a593Smuzhiyun 		dma_test_ch0(NULL);
319*4882a593Smuzhiyun 	}
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	return 0;
322*4882a593Smuzhiyun }
323*4882a593Smuzhiyun 
pcie_dw_dmatest(const char * val,const struct kernel_param * kp)324*4882a593Smuzhiyun static int pcie_dw_dmatest(const char *val, const struct kernel_param *kp)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun 	char tmp[8];
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	if (!s_dmatest_dev[0].obj) {
329*4882a593Smuzhiyun 		pr_err("dmatest dev not exits\n");
330*4882a593Smuzhiyun 		kfree(tmp);
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 		return -1;
333*4882a593Smuzhiyun 	}
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	strncpy(tmp, val, 8);
336*4882a593Smuzhiyun 	if (!strncmp(tmp, "run", 3)) {
337*4882a593Smuzhiyun 		dma_run();
338*4882a593Smuzhiyun 	} else if (!strncmp(tmp, "show", 4)) {
339*4882a593Smuzhiyun 		pcie_dw_dmatest_show();
340*4882a593Smuzhiyun 	} else {
341*4882a593Smuzhiyun 		pr_info("input error\n");
342*4882a593Smuzhiyun 	}
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	return 0;
345*4882a593Smuzhiyun }
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun static const struct kernel_param_ops pcie_dw_dmatest_ops = {
348*4882a593Smuzhiyun 	.set = pcie_dw_dmatest,
349*4882a593Smuzhiyun 	.get = param_get_uint,
350*4882a593Smuzhiyun };
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun module_param_cb(dmatest, &pcie_dw_dmatest_ops, &pcie_dw_dmatest, 0644);
353*4882a593Smuzhiyun MODULE_PARM_DESC(dmatest, "test rockchip pcie dma module");
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun MODULE_AUTHOR("Jon Lin");
356*4882a593Smuzhiyun MODULE_LICENSE("GPL");
357