xref: /OK3568_Linux_fs/kernel/drivers/pci/endpoint/functions/pci-epf-test.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /**
3*4882a593Smuzhiyun  * Test driver to test endpoint functionality
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2017 Texas Instruments
6*4882a593Smuzhiyun  * Author: Kishon Vijay Abraham I <kishon@ti.com>
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/crc32.h>
10*4882a593Smuzhiyun #include <linux/delay.h>
11*4882a593Smuzhiyun #include <linux/dmaengine.h>
12*4882a593Smuzhiyun #include <linux/io.h>
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/slab.h>
15*4882a593Smuzhiyun #include <linux/pci_ids.h>
16*4882a593Smuzhiyun #include <linux/random.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include <linux/pci-epc.h>
19*4882a593Smuzhiyun #include <linux/pci-epf.h>
20*4882a593Smuzhiyun #include <linux/pci_regs.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #define IRQ_TYPE_LEGACY			0
23*4882a593Smuzhiyun #define IRQ_TYPE_MSI			1
24*4882a593Smuzhiyun #define IRQ_TYPE_MSIX			2
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #define COMMAND_RAISE_LEGACY_IRQ	BIT(0)
27*4882a593Smuzhiyun #define COMMAND_RAISE_MSI_IRQ		BIT(1)
28*4882a593Smuzhiyun #define COMMAND_RAISE_MSIX_IRQ		BIT(2)
29*4882a593Smuzhiyun #define COMMAND_READ			BIT(3)
30*4882a593Smuzhiyun #define COMMAND_WRITE			BIT(4)
31*4882a593Smuzhiyun #define COMMAND_COPY			BIT(5)
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #define STATUS_READ_SUCCESS		BIT(0)
34*4882a593Smuzhiyun #define STATUS_READ_FAIL		BIT(1)
35*4882a593Smuzhiyun #define STATUS_WRITE_SUCCESS		BIT(2)
36*4882a593Smuzhiyun #define STATUS_WRITE_FAIL		BIT(3)
37*4882a593Smuzhiyun #define STATUS_COPY_SUCCESS		BIT(4)
38*4882a593Smuzhiyun #define STATUS_COPY_FAIL		BIT(5)
39*4882a593Smuzhiyun #define STATUS_IRQ_RAISED		BIT(6)
40*4882a593Smuzhiyun #define STATUS_SRC_ADDR_INVALID		BIT(7)
41*4882a593Smuzhiyun #define STATUS_DST_ADDR_INVALID		BIT(8)
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun #define FLAG_USE_DMA			BIT(0)
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #define TIMER_RESOLUTION		1
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun static struct workqueue_struct *kpcitest_workqueue;
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun struct pci_epf_test {
50*4882a593Smuzhiyun 	void			*reg[PCI_STD_NUM_BARS];
51*4882a593Smuzhiyun 	struct pci_epf		*epf;
52*4882a593Smuzhiyun 	enum pci_barno		test_reg_bar;
53*4882a593Smuzhiyun 	size_t			msix_table_offset;
54*4882a593Smuzhiyun 	struct delayed_work	cmd_handler;
55*4882a593Smuzhiyun 	struct dma_chan		*dma_chan;
56*4882a593Smuzhiyun 	struct completion	transfer_complete;
57*4882a593Smuzhiyun 	bool			dma_supported;
58*4882a593Smuzhiyun 	const struct pci_epc_features *epc_features;
59*4882a593Smuzhiyun };
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun struct pci_epf_test_reg {
62*4882a593Smuzhiyun 	u32	magic;
63*4882a593Smuzhiyun 	u32	command;
64*4882a593Smuzhiyun 	u32	status;
65*4882a593Smuzhiyun 	u64	src_addr;
66*4882a593Smuzhiyun 	u64	dst_addr;
67*4882a593Smuzhiyun 	u32	size;
68*4882a593Smuzhiyun 	u32	checksum;
69*4882a593Smuzhiyun 	u32	irq_type;
70*4882a593Smuzhiyun 	u32	irq_number;
71*4882a593Smuzhiyun 	u32	flags;
72*4882a593Smuzhiyun } __packed;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun static struct pci_epf_header test_header = {
75*4882a593Smuzhiyun 	.vendorid	= PCI_ANY_ID,
76*4882a593Smuzhiyun 	.deviceid	= PCI_ANY_ID,
77*4882a593Smuzhiyun 	.baseclass_code = PCI_CLASS_OTHERS,
78*4882a593Smuzhiyun 	.interrupt_pin	= PCI_INTERRUPT_INTA,
79*4882a593Smuzhiyun };
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun static size_t bar_size[] = { 512, 512, 1024, 16384, 131072, 1048576 };
82*4882a593Smuzhiyun 
pci_epf_test_dma_callback(void * param)83*4882a593Smuzhiyun static void pci_epf_test_dma_callback(void *param)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun 	struct pci_epf_test *epf_test = param;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	complete(&epf_test->transfer_complete);
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun /**
91*4882a593Smuzhiyun  * pci_epf_test_data_transfer() - Function that uses dmaengine API to transfer
92*4882a593Smuzhiyun  *				  data between PCIe EP and remote PCIe RC
93*4882a593Smuzhiyun  * @epf_test: the EPF test device that performs the data transfer operation
94*4882a593Smuzhiyun  * @dma_dst: The destination address of the data transfer. It can be a physical
95*4882a593Smuzhiyun  *	     address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
96*4882a593Smuzhiyun  * @dma_src: The source address of the data transfer. It can be a physical
97*4882a593Smuzhiyun  *	     address given by pci_epc_mem_alloc_addr or DMA mapping APIs.
98*4882a593Smuzhiyun  * @len: The size of the data transfer
99*4882a593Smuzhiyun  *
100*4882a593Smuzhiyun  * Function that uses dmaengine API to transfer data between PCIe EP and remote
101*4882a593Smuzhiyun  * PCIe RC. The source and destination address can be a physical address given
102*4882a593Smuzhiyun  * by pci_epc_mem_alloc_addr or the one obtained using DMA mapping APIs.
103*4882a593Smuzhiyun  *
104*4882a593Smuzhiyun  * The function returns '0' on success and negative value on failure.
105*4882a593Smuzhiyun  */
pci_epf_test_data_transfer(struct pci_epf_test * epf_test,dma_addr_t dma_dst,dma_addr_t dma_src,size_t len)106*4882a593Smuzhiyun static int pci_epf_test_data_transfer(struct pci_epf_test *epf_test,
107*4882a593Smuzhiyun 				      dma_addr_t dma_dst, dma_addr_t dma_src,
108*4882a593Smuzhiyun 				      size_t len)
109*4882a593Smuzhiyun {
110*4882a593Smuzhiyun 	enum dma_ctrl_flags flags = DMA_CTRL_ACK | DMA_PREP_INTERRUPT;
111*4882a593Smuzhiyun 	struct dma_chan *chan = epf_test->dma_chan;
112*4882a593Smuzhiyun 	struct pci_epf *epf = epf_test->epf;
113*4882a593Smuzhiyun 	struct dma_async_tx_descriptor *tx;
114*4882a593Smuzhiyun 	struct device *dev = &epf->dev;
115*4882a593Smuzhiyun 	dma_cookie_t cookie;
116*4882a593Smuzhiyun 	int ret;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	if (IS_ERR_OR_NULL(chan)) {
119*4882a593Smuzhiyun 		dev_err(dev, "Invalid DMA memcpy channel\n");
120*4882a593Smuzhiyun 		return -EINVAL;
121*4882a593Smuzhiyun 	}
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	tx = dmaengine_prep_dma_memcpy(chan, dma_dst, dma_src, len, flags);
124*4882a593Smuzhiyun 	if (!tx) {
125*4882a593Smuzhiyun 		dev_err(dev, "Failed to prepare DMA memcpy\n");
126*4882a593Smuzhiyun 		return -EIO;
127*4882a593Smuzhiyun 	}
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun 	tx->callback = pci_epf_test_dma_callback;
130*4882a593Smuzhiyun 	tx->callback_param = epf_test;
131*4882a593Smuzhiyun 	cookie = tx->tx_submit(tx);
132*4882a593Smuzhiyun 	reinit_completion(&epf_test->transfer_complete);
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	ret = dma_submit_error(cookie);
135*4882a593Smuzhiyun 	if (ret) {
136*4882a593Smuzhiyun 		dev_err(dev, "Failed to do DMA tx_submit %d\n", cookie);
137*4882a593Smuzhiyun 		return -EIO;
138*4882a593Smuzhiyun 	}
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun 	dma_async_issue_pending(chan);
141*4882a593Smuzhiyun 	ret = wait_for_completion_interruptible(&epf_test->transfer_complete);
142*4882a593Smuzhiyun 	if (ret < 0) {
143*4882a593Smuzhiyun 		dmaengine_terminate_sync(chan);
144*4882a593Smuzhiyun 		dev_err(dev, "DMA wait_for_completion_timeout\n");
145*4882a593Smuzhiyun 		return -ETIMEDOUT;
146*4882a593Smuzhiyun 	}
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	return 0;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun /**
152*4882a593Smuzhiyun  * pci_epf_test_init_dma_chan() - Function to initialize EPF test DMA channel
153*4882a593Smuzhiyun  * @epf_test: the EPF test device that performs data transfer operation
154*4882a593Smuzhiyun  *
155*4882a593Smuzhiyun  * Function to initialize EPF test DMA channel.
156*4882a593Smuzhiyun  */
pci_epf_test_init_dma_chan(struct pci_epf_test * epf_test)157*4882a593Smuzhiyun static int pci_epf_test_init_dma_chan(struct pci_epf_test *epf_test)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun 	struct pci_epf *epf = epf_test->epf;
160*4882a593Smuzhiyun 	struct device *dev = &epf->dev;
161*4882a593Smuzhiyun 	struct dma_chan *dma_chan;
162*4882a593Smuzhiyun 	dma_cap_mask_t mask;
163*4882a593Smuzhiyun 	int ret;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	dma_cap_zero(mask);
166*4882a593Smuzhiyun 	dma_cap_set(DMA_MEMCPY, mask);
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	dma_chan = dma_request_chan_by_mask(&mask);
169*4882a593Smuzhiyun 	if (IS_ERR(dma_chan)) {
170*4882a593Smuzhiyun 		ret = PTR_ERR(dma_chan);
171*4882a593Smuzhiyun 		if (ret != -EPROBE_DEFER)
172*4882a593Smuzhiyun 			dev_err(dev, "Failed to get DMA channel\n");
173*4882a593Smuzhiyun 		return ret;
174*4882a593Smuzhiyun 	}
175*4882a593Smuzhiyun 	init_completion(&epf_test->transfer_complete);
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	epf_test->dma_chan = dma_chan;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	return 0;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun /**
183*4882a593Smuzhiyun  * pci_epf_test_clean_dma_chan() - Function to cleanup EPF test DMA channel
184*4882a593Smuzhiyun  * @epf_test: the EPF test device that performs data transfer operation
185*4882a593Smuzhiyun  *
186*4882a593Smuzhiyun  * Helper to cleanup EPF test DMA channel.
187*4882a593Smuzhiyun  */
pci_epf_test_clean_dma_chan(struct pci_epf_test * epf_test)188*4882a593Smuzhiyun static void pci_epf_test_clean_dma_chan(struct pci_epf_test *epf_test)
189*4882a593Smuzhiyun {
190*4882a593Smuzhiyun 	if (!epf_test->dma_supported)
191*4882a593Smuzhiyun 		return;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 	dma_release_channel(epf_test->dma_chan);
194*4882a593Smuzhiyun 	epf_test->dma_chan = NULL;
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun 
pci_epf_test_print_rate(const char * ops,u64 size,struct timespec64 * start,struct timespec64 * end,bool dma)197*4882a593Smuzhiyun static void pci_epf_test_print_rate(const char *ops, u64 size,
198*4882a593Smuzhiyun 				    struct timespec64 *start,
199*4882a593Smuzhiyun 				    struct timespec64 *end, bool dma)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun 	struct timespec64 ts;
202*4882a593Smuzhiyun 	u64 rate, ns;
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	ts = timespec64_sub(*end, *start);
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	/* convert both size (stored in 'rate') and time in terms of 'ns' */
207*4882a593Smuzhiyun 	ns = timespec64_to_ns(&ts);
208*4882a593Smuzhiyun 	rate = size * NSEC_PER_SEC;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	/* Divide both size (stored in 'rate') and ns by a common factor */
211*4882a593Smuzhiyun 	while (ns > UINT_MAX) {
212*4882a593Smuzhiyun 		rate >>= 1;
213*4882a593Smuzhiyun 		ns >>= 1;
214*4882a593Smuzhiyun 	}
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	if (!ns)
217*4882a593Smuzhiyun 		return;
218*4882a593Smuzhiyun 
219*4882a593Smuzhiyun 	/* calculate the rate */
220*4882a593Smuzhiyun 	do_div(rate, (uint32_t)ns);
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	pr_info("\n%s => Size: %llu bytes\t DMA: %s\t Time: %llu.%09u seconds\t"
223*4882a593Smuzhiyun 		"Rate: %llu KB/s\n", ops, size, dma ? "YES" : "NO",
224*4882a593Smuzhiyun 		(u64)ts.tv_sec, (u32)ts.tv_nsec, rate / 1024);
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun 
pci_epf_test_copy(struct pci_epf_test * epf_test)227*4882a593Smuzhiyun static int pci_epf_test_copy(struct pci_epf_test *epf_test)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun 	int ret;
230*4882a593Smuzhiyun 	bool use_dma;
231*4882a593Smuzhiyun 	void __iomem *src_addr;
232*4882a593Smuzhiyun 	void __iomem *dst_addr;
233*4882a593Smuzhiyun 	phys_addr_t src_phys_addr;
234*4882a593Smuzhiyun 	phys_addr_t dst_phys_addr;
235*4882a593Smuzhiyun 	struct timespec64 start, end;
236*4882a593Smuzhiyun 	struct pci_epf *epf = epf_test->epf;
237*4882a593Smuzhiyun 	struct device *dev = &epf->dev;
238*4882a593Smuzhiyun 	struct pci_epc *epc = epf->epc;
239*4882a593Smuzhiyun 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
240*4882a593Smuzhiyun 	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	src_addr = pci_epc_mem_alloc_addr(epc, &src_phys_addr, reg->size);
243*4882a593Smuzhiyun 	if (!src_addr) {
244*4882a593Smuzhiyun 		dev_err(dev, "Failed to allocate source address\n");
245*4882a593Smuzhiyun 		reg->status = STATUS_SRC_ADDR_INVALID;
246*4882a593Smuzhiyun 		ret = -ENOMEM;
247*4882a593Smuzhiyun 		goto err;
248*4882a593Smuzhiyun 	}
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	ret = pci_epc_map_addr(epc, epf->func_no, src_phys_addr, reg->src_addr,
251*4882a593Smuzhiyun 			       reg->size);
252*4882a593Smuzhiyun 	if (ret) {
253*4882a593Smuzhiyun 		dev_err(dev, "Failed to map source address\n");
254*4882a593Smuzhiyun 		reg->status = STATUS_SRC_ADDR_INVALID;
255*4882a593Smuzhiyun 		goto err_src_addr;
256*4882a593Smuzhiyun 	}
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	dst_addr = pci_epc_mem_alloc_addr(epc, &dst_phys_addr, reg->size);
259*4882a593Smuzhiyun 	if (!dst_addr) {
260*4882a593Smuzhiyun 		dev_err(dev, "Failed to allocate destination address\n");
261*4882a593Smuzhiyun 		reg->status = STATUS_DST_ADDR_INVALID;
262*4882a593Smuzhiyun 		ret = -ENOMEM;
263*4882a593Smuzhiyun 		goto err_src_map_addr;
264*4882a593Smuzhiyun 	}
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	ret = pci_epc_map_addr(epc, epf->func_no, dst_phys_addr, reg->dst_addr,
267*4882a593Smuzhiyun 			       reg->size);
268*4882a593Smuzhiyun 	if (ret) {
269*4882a593Smuzhiyun 		dev_err(dev, "Failed to map destination address\n");
270*4882a593Smuzhiyun 		reg->status = STATUS_DST_ADDR_INVALID;
271*4882a593Smuzhiyun 		goto err_dst_addr;
272*4882a593Smuzhiyun 	}
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	ktime_get_ts64(&start);
275*4882a593Smuzhiyun 	use_dma = !!(reg->flags & FLAG_USE_DMA);
276*4882a593Smuzhiyun 	if (use_dma) {
277*4882a593Smuzhiyun 		if (!epf_test->dma_supported) {
278*4882a593Smuzhiyun 			dev_err(dev, "Cannot transfer data using DMA\n");
279*4882a593Smuzhiyun 			ret = -EINVAL;
280*4882a593Smuzhiyun 			goto err_map_addr;
281*4882a593Smuzhiyun 		}
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 		ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
284*4882a593Smuzhiyun 						 src_phys_addr, reg->size);
285*4882a593Smuzhiyun 		if (ret)
286*4882a593Smuzhiyun 			dev_err(dev, "Data transfer failed\n");
287*4882a593Smuzhiyun 	} else {
288*4882a593Smuzhiyun 		void *buf;
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 		buf = kzalloc(reg->size, GFP_KERNEL);
291*4882a593Smuzhiyun 		if (!buf) {
292*4882a593Smuzhiyun 			ret = -ENOMEM;
293*4882a593Smuzhiyun 			goto err_map_addr;
294*4882a593Smuzhiyun 		}
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 		memcpy_fromio(buf, src_addr, reg->size);
297*4882a593Smuzhiyun 		memcpy_toio(dst_addr, buf, reg->size);
298*4882a593Smuzhiyun 		kfree(buf);
299*4882a593Smuzhiyun 	}
300*4882a593Smuzhiyun 	ktime_get_ts64(&end);
301*4882a593Smuzhiyun 	pci_epf_test_print_rate("COPY", reg->size, &start, &end, use_dma);
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun err_map_addr:
304*4882a593Smuzhiyun 	pci_epc_unmap_addr(epc, epf->func_no, dst_phys_addr);
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun err_dst_addr:
307*4882a593Smuzhiyun 	pci_epc_mem_free_addr(epc, dst_phys_addr, dst_addr, reg->size);
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun err_src_map_addr:
310*4882a593Smuzhiyun 	pci_epc_unmap_addr(epc, epf->func_no, src_phys_addr);
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun err_src_addr:
313*4882a593Smuzhiyun 	pci_epc_mem_free_addr(epc, src_phys_addr, src_addr, reg->size);
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun err:
316*4882a593Smuzhiyun 	return ret;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun 
pci_epf_test_read(struct pci_epf_test * epf_test)319*4882a593Smuzhiyun static int pci_epf_test_read(struct pci_epf_test *epf_test)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun 	int ret;
322*4882a593Smuzhiyun 	void __iomem *src_addr;
323*4882a593Smuzhiyun 	void *buf;
324*4882a593Smuzhiyun 	u32 crc32;
325*4882a593Smuzhiyun 	bool use_dma;
326*4882a593Smuzhiyun 	phys_addr_t phys_addr;
327*4882a593Smuzhiyun 	phys_addr_t dst_phys_addr;
328*4882a593Smuzhiyun 	struct timespec64 start, end;
329*4882a593Smuzhiyun 	struct pci_epf *epf = epf_test->epf;
330*4882a593Smuzhiyun 	struct device *dev = &epf->dev;
331*4882a593Smuzhiyun 	struct pci_epc *epc = epf->epc;
332*4882a593Smuzhiyun 	struct device *dma_dev = epf->epc->dev.parent;
333*4882a593Smuzhiyun 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
334*4882a593Smuzhiyun 	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	src_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
337*4882a593Smuzhiyun 	if (!src_addr) {
338*4882a593Smuzhiyun 		dev_err(dev, "Failed to allocate address\n");
339*4882a593Smuzhiyun 		reg->status = STATUS_SRC_ADDR_INVALID;
340*4882a593Smuzhiyun 		ret = -ENOMEM;
341*4882a593Smuzhiyun 		goto err;
342*4882a593Smuzhiyun 	}
343*4882a593Smuzhiyun 
344*4882a593Smuzhiyun 	ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->src_addr,
345*4882a593Smuzhiyun 			       reg->size);
346*4882a593Smuzhiyun 	if (ret) {
347*4882a593Smuzhiyun 		dev_err(dev, "Failed to map address\n");
348*4882a593Smuzhiyun 		reg->status = STATUS_SRC_ADDR_INVALID;
349*4882a593Smuzhiyun 		goto err_addr;
350*4882a593Smuzhiyun 	}
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	buf = kzalloc(reg->size, GFP_KERNEL);
353*4882a593Smuzhiyun 	if (!buf) {
354*4882a593Smuzhiyun 		ret = -ENOMEM;
355*4882a593Smuzhiyun 		goto err_map_addr;
356*4882a593Smuzhiyun 	}
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	use_dma = !!(reg->flags & FLAG_USE_DMA);
359*4882a593Smuzhiyun 	if (use_dma) {
360*4882a593Smuzhiyun 		if (!epf_test->dma_supported) {
361*4882a593Smuzhiyun 			dev_err(dev, "Cannot transfer data using DMA\n");
362*4882a593Smuzhiyun 			ret = -EINVAL;
363*4882a593Smuzhiyun 			goto err_dma_map;
364*4882a593Smuzhiyun 		}
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 		dst_phys_addr = dma_map_single(dma_dev, buf, reg->size,
367*4882a593Smuzhiyun 					       DMA_FROM_DEVICE);
368*4882a593Smuzhiyun 		if (dma_mapping_error(dma_dev, dst_phys_addr)) {
369*4882a593Smuzhiyun 			dev_err(dev, "Failed to map destination buffer addr\n");
370*4882a593Smuzhiyun 			ret = -ENOMEM;
371*4882a593Smuzhiyun 			goto err_dma_map;
372*4882a593Smuzhiyun 		}
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun 		ktime_get_ts64(&start);
375*4882a593Smuzhiyun 		ret = pci_epf_test_data_transfer(epf_test, dst_phys_addr,
376*4882a593Smuzhiyun 						 phys_addr, reg->size);
377*4882a593Smuzhiyun 		if (ret)
378*4882a593Smuzhiyun 			dev_err(dev, "Data transfer failed\n");
379*4882a593Smuzhiyun 		ktime_get_ts64(&end);
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 		dma_unmap_single(dma_dev, dst_phys_addr, reg->size,
382*4882a593Smuzhiyun 				 DMA_FROM_DEVICE);
383*4882a593Smuzhiyun 	} else {
384*4882a593Smuzhiyun 		ktime_get_ts64(&start);
385*4882a593Smuzhiyun 		memcpy_fromio(buf, src_addr, reg->size);
386*4882a593Smuzhiyun 		ktime_get_ts64(&end);
387*4882a593Smuzhiyun 	}
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	pci_epf_test_print_rate("READ", reg->size, &start, &end, use_dma);
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	crc32 = crc32_le(~0, buf, reg->size);
392*4882a593Smuzhiyun 	if (crc32 != reg->checksum)
393*4882a593Smuzhiyun 		ret = -EIO;
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun err_dma_map:
396*4882a593Smuzhiyun 	kfree(buf);
397*4882a593Smuzhiyun 
398*4882a593Smuzhiyun err_map_addr:
399*4882a593Smuzhiyun 	pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun err_addr:
402*4882a593Smuzhiyun 	pci_epc_mem_free_addr(epc, phys_addr, src_addr, reg->size);
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun err:
405*4882a593Smuzhiyun 	return ret;
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun 
pci_epf_test_write(struct pci_epf_test * epf_test)408*4882a593Smuzhiyun static int pci_epf_test_write(struct pci_epf_test *epf_test)
409*4882a593Smuzhiyun {
410*4882a593Smuzhiyun 	int ret;
411*4882a593Smuzhiyun 	void __iomem *dst_addr;
412*4882a593Smuzhiyun 	void *buf;
413*4882a593Smuzhiyun 	bool use_dma;
414*4882a593Smuzhiyun 	phys_addr_t phys_addr;
415*4882a593Smuzhiyun 	phys_addr_t src_phys_addr;
416*4882a593Smuzhiyun 	struct timespec64 start, end;
417*4882a593Smuzhiyun 	struct pci_epf *epf = epf_test->epf;
418*4882a593Smuzhiyun 	struct device *dev = &epf->dev;
419*4882a593Smuzhiyun 	struct pci_epc *epc = epf->epc;
420*4882a593Smuzhiyun 	struct device *dma_dev = epf->epc->dev.parent;
421*4882a593Smuzhiyun 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
422*4882a593Smuzhiyun 	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	dst_addr = pci_epc_mem_alloc_addr(epc, &phys_addr, reg->size);
425*4882a593Smuzhiyun 	if (!dst_addr) {
426*4882a593Smuzhiyun 		dev_err(dev, "Failed to allocate address\n");
427*4882a593Smuzhiyun 		reg->status = STATUS_DST_ADDR_INVALID;
428*4882a593Smuzhiyun 		ret = -ENOMEM;
429*4882a593Smuzhiyun 		goto err;
430*4882a593Smuzhiyun 	}
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	ret = pci_epc_map_addr(epc, epf->func_no, phys_addr, reg->dst_addr,
433*4882a593Smuzhiyun 			       reg->size);
434*4882a593Smuzhiyun 	if (ret) {
435*4882a593Smuzhiyun 		dev_err(dev, "Failed to map address\n");
436*4882a593Smuzhiyun 		reg->status = STATUS_DST_ADDR_INVALID;
437*4882a593Smuzhiyun 		goto err_addr;
438*4882a593Smuzhiyun 	}
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	buf = kzalloc(reg->size, GFP_KERNEL);
441*4882a593Smuzhiyun 	if (!buf) {
442*4882a593Smuzhiyun 		ret = -ENOMEM;
443*4882a593Smuzhiyun 		goto err_map_addr;
444*4882a593Smuzhiyun 	}
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	get_random_bytes(buf, reg->size);
447*4882a593Smuzhiyun 	reg->checksum = crc32_le(~0, buf, reg->size);
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	use_dma = !!(reg->flags & FLAG_USE_DMA);
450*4882a593Smuzhiyun 	if (use_dma) {
451*4882a593Smuzhiyun 		if (!epf_test->dma_supported) {
452*4882a593Smuzhiyun 			dev_err(dev, "Cannot transfer data using DMA\n");
453*4882a593Smuzhiyun 			ret = -EINVAL;
454*4882a593Smuzhiyun 			goto err_dma_map;
455*4882a593Smuzhiyun 		}
456*4882a593Smuzhiyun 
457*4882a593Smuzhiyun 		src_phys_addr = dma_map_single(dma_dev, buf, reg->size,
458*4882a593Smuzhiyun 					       DMA_TO_DEVICE);
459*4882a593Smuzhiyun 		if (dma_mapping_error(dma_dev, src_phys_addr)) {
460*4882a593Smuzhiyun 			dev_err(dev, "Failed to map source buffer addr\n");
461*4882a593Smuzhiyun 			ret = -ENOMEM;
462*4882a593Smuzhiyun 			goto err_dma_map;
463*4882a593Smuzhiyun 		}
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun 		ktime_get_ts64(&start);
466*4882a593Smuzhiyun 		ret = pci_epf_test_data_transfer(epf_test, phys_addr,
467*4882a593Smuzhiyun 						 src_phys_addr, reg->size);
468*4882a593Smuzhiyun 		if (ret)
469*4882a593Smuzhiyun 			dev_err(dev, "Data transfer failed\n");
470*4882a593Smuzhiyun 		ktime_get_ts64(&end);
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 		dma_unmap_single(dma_dev, src_phys_addr, reg->size,
473*4882a593Smuzhiyun 				 DMA_TO_DEVICE);
474*4882a593Smuzhiyun 	} else {
475*4882a593Smuzhiyun 		ktime_get_ts64(&start);
476*4882a593Smuzhiyun 		memcpy_toio(dst_addr, buf, reg->size);
477*4882a593Smuzhiyun 		ktime_get_ts64(&end);
478*4882a593Smuzhiyun 	}
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	pci_epf_test_print_rate("WRITE", reg->size, &start, &end, use_dma);
481*4882a593Smuzhiyun 
482*4882a593Smuzhiyun 	/*
483*4882a593Smuzhiyun 	 * wait 1ms inorder for the write to complete. Without this delay L3
484*4882a593Smuzhiyun 	 * error in observed in the host system.
485*4882a593Smuzhiyun 	 */
486*4882a593Smuzhiyun 	usleep_range(1000, 2000);
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun err_dma_map:
489*4882a593Smuzhiyun 	kfree(buf);
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun err_map_addr:
492*4882a593Smuzhiyun 	pci_epc_unmap_addr(epc, epf->func_no, phys_addr);
493*4882a593Smuzhiyun 
494*4882a593Smuzhiyun err_addr:
495*4882a593Smuzhiyun 	pci_epc_mem_free_addr(epc, phys_addr, dst_addr, reg->size);
496*4882a593Smuzhiyun 
497*4882a593Smuzhiyun err:
498*4882a593Smuzhiyun 	return ret;
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun 
pci_epf_test_raise_irq(struct pci_epf_test * epf_test,u8 irq_type,u16 irq)501*4882a593Smuzhiyun static void pci_epf_test_raise_irq(struct pci_epf_test *epf_test, u8 irq_type,
502*4882a593Smuzhiyun 				   u16 irq)
503*4882a593Smuzhiyun {
504*4882a593Smuzhiyun 	struct pci_epf *epf = epf_test->epf;
505*4882a593Smuzhiyun 	struct device *dev = &epf->dev;
506*4882a593Smuzhiyun 	struct pci_epc *epc = epf->epc;
507*4882a593Smuzhiyun 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
508*4882a593Smuzhiyun 	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	reg->status |= STATUS_IRQ_RAISED;
511*4882a593Smuzhiyun 
512*4882a593Smuzhiyun 	switch (irq_type) {
513*4882a593Smuzhiyun 	case IRQ_TYPE_LEGACY:
514*4882a593Smuzhiyun 		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
515*4882a593Smuzhiyun 		break;
516*4882a593Smuzhiyun 	case IRQ_TYPE_MSI:
517*4882a593Smuzhiyun 		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI, irq);
518*4882a593Smuzhiyun 		break;
519*4882a593Smuzhiyun 	case IRQ_TYPE_MSIX:
520*4882a593Smuzhiyun 		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX, irq);
521*4882a593Smuzhiyun 		break;
522*4882a593Smuzhiyun 	default:
523*4882a593Smuzhiyun 		dev_err(dev, "Failed to raise IRQ, unknown type\n");
524*4882a593Smuzhiyun 		break;
525*4882a593Smuzhiyun 	}
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun 
pci_epf_test_cmd_handler(struct work_struct * work)528*4882a593Smuzhiyun static void pci_epf_test_cmd_handler(struct work_struct *work)
529*4882a593Smuzhiyun {
530*4882a593Smuzhiyun 	int ret;
531*4882a593Smuzhiyun 	int count;
532*4882a593Smuzhiyun 	u32 command;
533*4882a593Smuzhiyun 	struct pci_epf_test *epf_test = container_of(work, struct pci_epf_test,
534*4882a593Smuzhiyun 						     cmd_handler.work);
535*4882a593Smuzhiyun 	struct pci_epf *epf = epf_test->epf;
536*4882a593Smuzhiyun 	struct device *dev = &epf->dev;
537*4882a593Smuzhiyun 	struct pci_epc *epc = epf->epc;
538*4882a593Smuzhiyun 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
539*4882a593Smuzhiyun 	struct pci_epf_test_reg *reg = epf_test->reg[test_reg_bar];
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	command = reg->command;
542*4882a593Smuzhiyun 	if (!command)
543*4882a593Smuzhiyun 		goto reset_handler;
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 	reg->command = 0;
546*4882a593Smuzhiyun 	reg->status = 0;
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	if (reg->irq_type > IRQ_TYPE_MSIX) {
549*4882a593Smuzhiyun 		dev_err(dev, "Failed to detect IRQ type\n");
550*4882a593Smuzhiyun 		goto reset_handler;
551*4882a593Smuzhiyun 	}
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	if (command & COMMAND_RAISE_LEGACY_IRQ) {
554*4882a593Smuzhiyun 		reg->status = STATUS_IRQ_RAISED;
555*4882a593Smuzhiyun 		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_LEGACY, 0);
556*4882a593Smuzhiyun 		goto reset_handler;
557*4882a593Smuzhiyun 	}
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	if (command & COMMAND_WRITE) {
560*4882a593Smuzhiyun 		ret = pci_epf_test_write(epf_test);
561*4882a593Smuzhiyun 		if (ret)
562*4882a593Smuzhiyun 			reg->status |= STATUS_WRITE_FAIL;
563*4882a593Smuzhiyun 		else
564*4882a593Smuzhiyun 			reg->status |= STATUS_WRITE_SUCCESS;
565*4882a593Smuzhiyun 		pci_epf_test_raise_irq(epf_test, reg->irq_type,
566*4882a593Smuzhiyun 				       reg->irq_number);
567*4882a593Smuzhiyun 		goto reset_handler;
568*4882a593Smuzhiyun 	}
569*4882a593Smuzhiyun 
570*4882a593Smuzhiyun 	if (command & COMMAND_READ) {
571*4882a593Smuzhiyun 		ret = pci_epf_test_read(epf_test);
572*4882a593Smuzhiyun 		if (!ret)
573*4882a593Smuzhiyun 			reg->status |= STATUS_READ_SUCCESS;
574*4882a593Smuzhiyun 		else
575*4882a593Smuzhiyun 			reg->status |= STATUS_READ_FAIL;
576*4882a593Smuzhiyun 		pci_epf_test_raise_irq(epf_test, reg->irq_type,
577*4882a593Smuzhiyun 				       reg->irq_number);
578*4882a593Smuzhiyun 		goto reset_handler;
579*4882a593Smuzhiyun 	}
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	if (command & COMMAND_COPY) {
582*4882a593Smuzhiyun 		ret = pci_epf_test_copy(epf_test);
583*4882a593Smuzhiyun 		if (!ret)
584*4882a593Smuzhiyun 			reg->status |= STATUS_COPY_SUCCESS;
585*4882a593Smuzhiyun 		else
586*4882a593Smuzhiyun 			reg->status |= STATUS_COPY_FAIL;
587*4882a593Smuzhiyun 		pci_epf_test_raise_irq(epf_test, reg->irq_type,
588*4882a593Smuzhiyun 				       reg->irq_number);
589*4882a593Smuzhiyun 		goto reset_handler;
590*4882a593Smuzhiyun 	}
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 	if (command & COMMAND_RAISE_MSI_IRQ) {
593*4882a593Smuzhiyun 		count = pci_epc_get_msi(epc, epf->func_no);
594*4882a593Smuzhiyun 		if (reg->irq_number > count || count <= 0)
595*4882a593Smuzhiyun 			goto reset_handler;
596*4882a593Smuzhiyun 		reg->status = STATUS_IRQ_RAISED;
597*4882a593Smuzhiyun 		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSI,
598*4882a593Smuzhiyun 				  reg->irq_number);
599*4882a593Smuzhiyun 		goto reset_handler;
600*4882a593Smuzhiyun 	}
601*4882a593Smuzhiyun 
602*4882a593Smuzhiyun 	if (command & COMMAND_RAISE_MSIX_IRQ) {
603*4882a593Smuzhiyun 		count = pci_epc_get_msix(epc, epf->func_no);
604*4882a593Smuzhiyun 		if (reg->irq_number > count || count <= 0)
605*4882a593Smuzhiyun 			goto reset_handler;
606*4882a593Smuzhiyun 		reg->status = STATUS_IRQ_RAISED;
607*4882a593Smuzhiyun 		pci_epc_raise_irq(epc, epf->func_no, PCI_EPC_IRQ_MSIX,
608*4882a593Smuzhiyun 				  reg->irq_number);
609*4882a593Smuzhiyun 		goto reset_handler;
610*4882a593Smuzhiyun 	}
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun reset_handler:
613*4882a593Smuzhiyun 	queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
614*4882a593Smuzhiyun 			   msecs_to_jiffies(1));
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun 
pci_epf_test_unbind(struct pci_epf * epf)617*4882a593Smuzhiyun static void pci_epf_test_unbind(struct pci_epf *epf)
618*4882a593Smuzhiyun {
619*4882a593Smuzhiyun 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
620*4882a593Smuzhiyun 	struct pci_epc *epc = epf->epc;
621*4882a593Smuzhiyun 	struct pci_epf_bar *epf_bar;
622*4882a593Smuzhiyun 	int bar;
623*4882a593Smuzhiyun 
624*4882a593Smuzhiyun 	cancel_delayed_work(&epf_test->cmd_handler);
625*4882a593Smuzhiyun 	pci_epf_test_clean_dma_chan(epf_test);
626*4882a593Smuzhiyun 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar++) {
627*4882a593Smuzhiyun 		epf_bar = &epf->bar[bar];
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 		if (epf_test->reg[bar]) {
630*4882a593Smuzhiyun 			pci_epc_clear_bar(epc, epf->func_no, epf_bar);
631*4882a593Smuzhiyun 			pci_epf_free_space(epf, epf_test->reg[bar], bar);
632*4882a593Smuzhiyun 		}
633*4882a593Smuzhiyun 	}
634*4882a593Smuzhiyun }
635*4882a593Smuzhiyun 
pci_epf_test_set_bar(struct pci_epf * epf)636*4882a593Smuzhiyun static int pci_epf_test_set_bar(struct pci_epf *epf)
637*4882a593Smuzhiyun {
638*4882a593Smuzhiyun 	int bar, add;
639*4882a593Smuzhiyun 	int ret;
640*4882a593Smuzhiyun 	struct pci_epf_bar *epf_bar;
641*4882a593Smuzhiyun 	struct pci_epc *epc = epf->epc;
642*4882a593Smuzhiyun 	struct device *dev = &epf->dev;
643*4882a593Smuzhiyun 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
644*4882a593Smuzhiyun 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
645*4882a593Smuzhiyun 	const struct pci_epc_features *epc_features;
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	epc_features = epf_test->epc_features;
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
650*4882a593Smuzhiyun 		epf_bar = &epf->bar[bar];
651*4882a593Smuzhiyun 		/*
652*4882a593Smuzhiyun 		 * pci_epc_set_bar() sets PCI_BASE_ADDRESS_MEM_TYPE_64
653*4882a593Smuzhiyun 		 * if the specific implementation required a 64-bit BAR,
654*4882a593Smuzhiyun 		 * even if we only requested a 32-bit BAR.
655*4882a593Smuzhiyun 		 */
656*4882a593Smuzhiyun 		add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 		if (!!(epc_features->reserved_bar & (1 << bar)))
659*4882a593Smuzhiyun 			continue;
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun 		ret = pci_epc_set_bar(epc, epf->func_no, epf_bar);
662*4882a593Smuzhiyun 		if (ret) {
663*4882a593Smuzhiyun 			pci_epf_free_space(epf, epf_test->reg[bar], bar);
664*4882a593Smuzhiyun 			dev_err(dev, "Failed to set BAR%d\n", bar);
665*4882a593Smuzhiyun 			if (bar == test_reg_bar)
666*4882a593Smuzhiyun 				return ret;
667*4882a593Smuzhiyun 		}
668*4882a593Smuzhiyun 	}
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 	return 0;
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun 
pci_epf_test_core_init(struct pci_epf * epf)673*4882a593Smuzhiyun static int pci_epf_test_core_init(struct pci_epf *epf)
674*4882a593Smuzhiyun {
675*4882a593Smuzhiyun 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
676*4882a593Smuzhiyun 	struct pci_epf_header *header = epf->header;
677*4882a593Smuzhiyun 	const struct pci_epc_features *epc_features;
678*4882a593Smuzhiyun 	struct pci_epc *epc = epf->epc;
679*4882a593Smuzhiyun 	struct device *dev = &epf->dev;
680*4882a593Smuzhiyun 	bool msix_capable = false;
681*4882a593Smuzhiyun 	bool msi_capable = true;
682*4882a593Smuzhiyun 	int ret;
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	epc_features = pci_epc_get_features(epc, epf->func_no);
685*4882a593Smuzhiyun 	if (epc_features) {
686*4882a593Smuzhiyun 		msix_capable = epc_features->msix_capable;
687*4882a593Smuzhiyun 		msi_capable = epc_features->msi_capable;
688*4882a593Smuzhiyun 	}
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun 	ret = pci_epc_write_header(epc, epf->func_no, header);
691*4882a593Smuzhiyun 	if (ret) {
692*4882a593Smuzhiyun 		dev_err(dev, "Configuration header write failed\n");
693*4882a593Smuzhiyun 		return ret;
694*4882a593Smuzhiyun 	}
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 	ret = pci_epf_test_set_bar(epf);
697*4882a593Smuzhiyun 	if (ret)
698*4882a593Smuzhiyun 		return ret;
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	if (msi_capable) {
701*4882a593Smuzhiyun 		ret = pci_epc_set_msi(epc, epf->func_no, epf->msi_interrupts);
702*4882a593Smuzhiyun 		if (ret) {
703*4882a593Smuzhiyun 			dev_err(dev, "MSI configuration failed\n");
704*4882a593Smuzhiyun 			return ret;
705*4882a593Smuzhiyun 		}
706*4882a593Smuzhiyun 	}
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun 	if (msix_capable) {
709*4882a593Smuzhiyun 		ret = pci_epc_set_msix(epc, epf->func_no, epf->msix_interrupts,
710*4882a593Smuzhiyun 				       epf_test->test_reg_bar,
711*4882a593Smuzhiyun 				       epf_test->msix_table_offset);
712*4882a593Smuzhiyun 		if (ret) {
713*4882a593Smuzhiyun 			dev_err(dev, "MSI-X configuration failed\n");
714*4882a593Smuzhiyun 			return ret;
715*4882a593Smuzhiyun 		}
716*4882a593Smuzhiyun 	}
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 	return 0;
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun 
pci_epf_test_notifier(struct notifier_block * nb,unsigned long val,void * data)721*4882a593Smuzhiyun static int pci_epf_test_notifier(struct notifier_block *nb, unsigned long val,
722*4882a593Smuzhiyun 				 void *data)
723*4882a593Smuzhiyun {
724*4882a593Smuzhiyun 	struct pci_epf *epf = container_of(nb, struct pci_epf, nb);
725*4882a593Smuzhiyun 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
726*4882a593Smuzhiyun 	int ret;
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 	switch (val) {
729*4882a593Smuzhiyun 	case CORE_INIT:
730*4882a593Smuzhiyun 		ret = pci_epf_test_core_init(epf);
731*4882a593Smuzhiyun 		if (ret)
732*4882a593Smuzhiyun 			return NOTIFY_BAD;
733*4882a593Smuzhiyun 		break;
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun 	case LINK_UP:
736*4882a593Smuzhiyun 		queue_delayed_work(kpcitest_workqueue, &epf_test->cmd_handler,
737*4882a593Smuzhiyun 				   msecs_to_jiffies(1));
738*4882a593Smuzhiyun 		break;
739*4882a593Smuzhiyun 
740*4882a593Smuzhiyun 	default:
741*4882a593Smuzhiyun 		dev_err(&epf->dev, "Invalid EPF test notifier event\n");
742*4882a593Smuzhiyun 		return NOTIFY_BAD;
743*4882a593Smuzhiyun 	}
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 	return NOTIFY_OK;
746*4882a593Smuzhiyun }
747*4882a593Smuzhiyun 
pci_epf_test_alloc_space(struct pci_epf * epf)748*4882a593Smuzhiyun static int pci_epf_test_alloc_space(struct pci_epf *epf)
749*4882a593Smuzhiyun {
750*4882a593Smuzhiyun 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
751*4882a593Smuzhiyun 	struct device *dev = &epf->dev;
752*4882a593Smuzhiyun 	struct pci_epf_bar *epf_bar;
753*4882a593Smuzhiyun 	size_t msix_table_size = 0;
754*4882a593Smuzhiyun 	size_t test_reg_bar_size;
755*4882a593Smuzhiyun 	size_t pba_size = 0;
756*4882a593Smuzhiyun 	bool msix_capable;
757*4882a593Smuzhiyun 	void *base;
758*4882a593Smuzhiyun 	int bar, add;
759*4882a593Smuzhiyun 	enum pci_barno test_reg_bar = epf_test->test_reg_bar;
760*4882a593Smuzhiyun 	const struct pci_epc_features *epc_features;
761*4882a593Smuzhiyun 	size_t test_reg_size;
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun 	epc_features = epf_test->epc_features;
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun 	test_reg_bar_size = ALIGN(sizeof(struct pci_epf_test_reg), 128);
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 	msix_capable = epc_features->msix_capable;
768*4882a593Smuzhiyun 	if (msix_capable) {
769*4882a593Smuzhiyun 		msix_table_size = PCI_MSIX_ENTRY_SIZE * epf->msix_interrupts;
770*4882a593Smuzhiyun 		epf_test->msix_table_offset = test_reg_bar_size;
771*4882a593Smuzhiyun 		/* Align to QWORD or 8 Bytes */
772*4882a593Smuzhiyun 		pba_size = ALIGN(DIV_ROUND_UP(epf->msix_interrupts, 8), 8);
773*4882a593Smuzhiyun 	}
774*4882a593Smuzhiyun 	test_reg_size = test_reg_bar_size + msix_table_size + pba_size;
775*4882a593Smuzhiyun 
776*4882a593Smuzhiyun 	if (epc_features->bar_fixed_size[test_reg_bar]) {
777*4882a593Smuzhiyun 		if (test_reg_size > bar_size[test_reg_bar])
778*4882a593Smuzhiyun 			return -ENOMEM;
779*4882a593Smuzhiyun 		test_reg_size = bar_size[test_reg_bar];
780*4882a593Smuzhiyun 	}
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 	base = pci_epf_alloc_space(epf, test_reg_size, test_reg_bar,
783*4882a593Smuzhiyun 				   epc_features->align);
784*4882a593Smuzhiyun 	if (!base) {
785*4882a593Smuzhiyun 		dev_err(dev, "Failed to allocated register space\n");
786*4882a593Smuzhiyun 		return -ENOMEM;
787*4882a593Smuzhiyun 	}
788*4882a593Smuzhiyun 	epf_test->reg[test_reg_bar] = base;
789*4882a593Smuzhiyun 
790*4882a593Smuzhiyun 	for (bar = 0; bar < PCI_STD_NUM_BARS; bar += add) {
791*4882a593Smuzhiyun 		epf_bar = &epf->bar[bar];
792*4882a593Smuzhiyun 		add = (epf_bar->flags & PCI_BASE_ADDRESS_MEM_TYPE_64) ? 2 : 1;
793*4882a593Smuzhiyun 
794*4882a593Smuzhiyun 		if (bar == test_reg_bar)
795*4882a593Smuzhiyun 			continue;
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 		if (!!(epc_features->reserved_bar & (1 << bar)))
798*4882a593Smuzhiyun 			continue;
799*4882a593Smuzhiyun 
800*4882a593Smuzhiyun 		base = pci_epf_alloc_space(epf, bar_size[bar], bar,
801*4882a593Smuzhiyun 					   epc_features->align);
802*4882a593Smuzhiyun 		if (!base)
803*4882a593Smuzhiyun 			dev_err(dev, "Failed to allocate space for BAR%d\n",
804*4882a593Smuzhiyun 				bar);
805*4882a593Smuzhiyun 		epf_test->reg[bar] = base;
806*4882a593Smuzhiyun 	}
807*4882a593Smuzhiyun 
808*4882a593Smuzhiyun 	return 0;
809*4882a593Smuzhiyun }
810*4882a593Smuzhiyun 
pci_epf_configure_bar(struct pci_epf * epf,const struct pci_epc_features * epc_features)811*4882a593Smuzhiyun static void pci_epf_configure_bar(struct pci_epf *epf,
812*4882a593Smuzhiyun 				  const struct pci_epc_features *epc_features)
813*4882a593Smuzhiyun {
814*4882a593Smuzhiyun 	struct pci_epf_bar *epf_bar;
815*4882a593Smuzhiyun 	bool bar_fixed_64bit;
816*4882a593Smuzhiyun 	int i;
817*4882a593Smuzhiyun 
818*4882a593Smuzhiyun 	for (i = 0; i < PCI_STD_NUM_BARS; i++) {
819*4882a593Smuzhiyun 		epf_bar = &epf->bar[i];
820*4882a593Smuzhiyun 		bar_fixed_64bit = !!(epc_features->bar_fixed_64bit & (1 << i));
821*4882a593Smuzhiyun 		if (bar_fixed_64bit)
822*4882a593Smuzhiyun 			epf_bar->flags |= PCI_BASE_ADDRESS_MEM_TYPE_64;
823*4882a593Smuzhiyun 		if (epc_features->bar_fixed_size[i])
824*4882a593Smuzhiyun 			bar_size[i] = epc_features->bar_fixed_size[i];
825*4882a593Smuzhiyun 	}
826*4882a593Smuzhiyun }
827*4882a593Smuzhiyun 
pci_epf_test_bind(struct pci_epf * epf)828*4882a593Smuzhiyun static int pci_epf_test_bind(struct pci_epf *epf)
829*4882a593Smuzhiyun {
830*4882a593Smuzhiyun 	int ret;
831*4882a593Smuzhiyun 	struct pci_epf_test *epf_test = epf_get_drvdata(epf);
832*4882a593Smuzhiyun 	const struct pci_epc_features *epc_features;
833*4882a593Smuzhiyun 	enum pci_barno test_reg_bar = BAR_0;
834*4882a593Smuzhiyun 	struct pci_epc *epc = epf->epc;
835*4882a593Smuzhiyun 	bool linkup_notifier = false;
836*4882a593Smuzhiyun 	bool core_init_notifier = false;
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun 	if (WARN_ON_ONCE(!epc))
839*4882a593Smuzhiyun 		return -EINVAL;
840*4882a593Smuzhiyun 
841*4882a593Smuzhiyun 	epc_features = pci_epc_get_features(epc, epf->func_no);
842*4882a593Smuzhiyun 	if (!epc_features) {
843*4882a593Smuzhiyun 		dev_err(&epf->dev, "epc_features not implemented\n");
844*4882a593Smuzhiyun 		return -EOPNOTSUPP;
845*4882a593Smuzhiyun 	}
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun 	linkup_notifier = epc_features->linkup_notifier;
848*4882a593Smuzhiyun 	core_init_notifier = epc_features->core_init_notifier;
849*4882a593Smuzhiyun 	test_reg_bar = pci_epc_get_first_free_bar(epc_features);
850*4882a593Smuzhiyun 	if (test_reg_bar < 0)
851*4882a593Smuzhiyun 		return -EINVAL;
852*4882a593Smuzhiyun 	pci_epf_configure_bar(epf, epc_features);
853*4882a593Smuzhiyun 
854*4882a593Smuzhiyun 	epf_test->test_reg_bar = test_reg_bar;
855*4882a593Smuzhiyun 	epf_test->epc_features = epc_features;
856*4882a593Smuzhiyun 
857*4882a593Smuzhiyun 	ret = pci_epf_test_alloc_space(epf);
858*4882a593Smuzhiyun 	if (ret)
859*4882a593Smuzhiyun 		return ret;
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 	if (!core_init_notifier) {
862*4882a593Smuzhiyun 		ret = pci_epf_test_core_init(epf);
863*4882a593Smuzhiyun 		if (ret)
864*4882a593Smuzhiyun 			return ret;
865*4882a593Smuzhiyun 	}
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun 	epf_test->dma_supported = true;
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 	ret = pci_epf_test_init_dma_chan(epf_test);
870*4882a593Smuzhiyun 	if (ret)
871*4882a593Smuzhiyun 		epf_test->dma_supported = false;
872*4882a593Smuzhiyun 
873*4882a593Smuzhiyun 	if (linkup_notifier) {
874*4882a593Smuzhiyun 		epf->nb.notifier_call = pci_epf_test_notifier;
875*4882a593Smuzhiyun 		pci_epc_register_notifier(epc, &epf->nb);
876*4882a593Smuzhiyun 	} else {
877*4882a593Smuzhiyun 		queue_work(kpcitest_workqueue, &epf_test->cmd_handler.work);
878*4882a593Smuzhiyun 	}
879*4882a593Smuzhiyun 
880*4882a593Smuzhiyun 	return 0;
881*4882a593Smuzhiyun }
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun static const struct pci_epf_device_id pci_epf_test_ids[] = {
884*4882a593Smuzhiyun 	{
885*4882a593Smuzhiyun 		.name = "pci_epf_test",
886*4882a593Smuzhiyun 	},
887*4882a593Smuzhiyun 	{},
888*4882a593Smuzhiyun };
889*4882a593Smuzhiyun 
pci_epf_test_probe(struct pci_epf * epf)890*4882a593Smuzhiyun static int pci_epf_test_probe(struct pci_epf *epf)
891*4882a593Smuzhiyun {
892*4882a593Smuzhiyun 	struct pci_epf_test *epf_test;
893*4882a593Smuzhiyun 	struct device *dev = &epf->dev;
894*4882a593Smuzhiyun 
895*4882a593Smuzhiyun 	epf_test = devm_kzalloc(dev, sizeof(*epf_test), GFP_KERNEL);
896*4882a593Smuzhiyun 	if (!epf_test)
897*4882a593Smuzhiyun 		return -ENOMEM;
898*4882a593Smuzhiyun 
899*4882a593Smuzhiyun 	epf->header = &test_header;
900*4882a593Smuzhiyun 	epf_test->epf = epf;
901*4882a593Smuzhiyun 
902*4882a593Smuzhiyun 	INIT_DELAYED_WORK(&epf_test->cmd_handler, pci_epf_test_cmd_handler);
903*4882a593Smuzhiyun 
904*4882a593Smuzhiyun 	epf_set_drvdata(epf, epf_test);
905*4882a593Smuzhiyun 	return 0;
906*4882a593Smuzhiyun }
907*4882a593Smuzhiyun 
908*4882a593Smuzhiyun static struct pci_epf_ops ops = {
909*4882a593Smuzhiyun 	.unbind	= pci_epf_test_unbind,
910*4882a593Smuzhiyun 	.bind	= pci_epf_test_bind,
911*4882a593Smuzhiyun };
912*4882a593Smuzhiyun 
913*4882a593Smuzhiyun static struct pci_epf_driver test_driver = {
914*4882a593Smuzhiyun 	.driver.name	= "pci_epf_test",
915*4882a593Smuzhiyun 	.probe		= pci_epf_test_probe,
916*4882a593Smuzhiyun 	.id_table	= pci_epf_test_ids,
917*4882a593Smuzhiyun 	.ops		= &ops,
918*4882a593Smuzhiyun 	.owner		= THIS_MODULE,
919*4882a593Smuzhiyun };
920*4882a593Smuzhiyun 
pci_epf_test_init(void)921*4882a593Smuzhiyun static int __init pci_epf_test_init(void)
922*4882a593Smuzhiyun {
923*4882a593Smuzhiyun 	int ret;
924*4882a593Smuzhiyun 
925*4882a593Smuzhiyun 	kpcitest_workqueue = alloc_workqueue("kpcitest",
926*4882a593Smuzhiyun 					     WQ_MEM_RECLAIM | WQ_HIGHPRI, 0);
927*4882a593Smuzhiyun 	if (!kpcitest_workqueue) {
928*4882a593Smuzhiyun 		pr_err("Failed to allocate the kpcitest work queue\n");
929*4882a593Smuzhiyun 		return -ENOMEM;
930*4882a593Smuzhiyun 	}
931*4882a593Smuzhiyun 
932*4882a593Smuzhiyun 	ret = pci_epf_register_driver(&test_driver);
933*4882a593Smuzhiyun 	if (ret) {
934*4882a593Smuzhiyun 		destroy_workqueue(kpcitest_workqueue);
935*4882a593Smuzhiyun 		pr_err("Failed to register pci epf test driver --> %d\n", ret);
936*4882a593Smuzhiyun 		return ret;
937*4882a593Smuzhiyun 	}
938*4882a593Smuzhiyun 
939*4882a593Smuzhiyun 	return 0;
940*4882a593Smuzhiyun }
941*4882a593Smuzhiyun module_init(pci_epf_test_init);
942*4882a593Smuzhiyun 
pci_epf_test_exit(void)943*4882a593Smuzhiyun static void __exit pci_epf_test_exit(void)
944*4882a593Smuzhiyun {
945*4882a593Smuzhiyun 	if (kpcitest_workqueue)
946*4882a593Smuzhiyun 		destroy_workqueue(kpcitest_workqueue);
947*4882a593Smuzhiyun 	pci_epf_unregister_driver(&test_driver);
948*4882a593Smuzhiyun }
949*4882a593Smuzhiyun module_exit(pci_epf_test_exit);
950*4882a593Smuzhiyun 
951*4882a593Smuzhiyun MODULE_DESCRIPTION("PCI EPF TEST DRIVER");
952*4882a593Smuzhiyun MODULE_AUTHOR("Kishon Vijay Abraham I <kishon@ti.com>");
953*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
954