xref: /OK3568_Linux_fs/kernel/drivers/fpga/altera-cvp.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * FPGA Manager Driver for Altera Arria/Cyclone/Stratix CvP
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2017 DENX Software Engineering
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Anatolij Gustschin <agust@denx.de>
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * Manage Altera FPGA firmware using PCIe CvP.
10*4882a593Smuzhiyun  * Firmware must be in binary "rbf" format.
11*4882a593Smuzhiyun  */
12*4882a593Smuzhiyun 
13*4882a593Smuzhiyun #include <linux/delay.h>
14*4882a593Smuzhiyun #include <linux/device.h>
15*4882a593Smuzhiyun #include <linux/fpga/fpga-mgr.h>
16*4882a593Smuzhiyun #include <linux/module.h>
17*4882a593Smuzhiyun #include <linux/pci.h>
18*4882a593Smuzhiyun #include <linux/sizes.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #define CVP_BAR		0	/* BAR used for data transfer in memory mode */
21*4882a593Smuzhiyun #define CVP_DUMMY_WR	244	/* dummy writes to clear CvP state machine */
22*4882a593Smuzhiyun #define TIMEOUT_US	2000	/* CVP STATUS timeout for USERMODE polling */
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun /* Vendor Specific Extended Capability Registers */
25*4882a593Smuzhiyun #define VSE_PCIE_EXT_CAP_ID		0x0
26*4882a593Smuzhiyun #define VSE_PCIE_EXT_CAP_ID_VAL		0x000b	/* 16bit */
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #define VSE_CVP_STATUS			0x1c	/* 32bit */
29*4882a593Smuzhiyun #define VSE_CVP_STATUS_CFG_RDY		BIT(18)	/* CVP_CONFIG_READY */
30*4882a593Smuzhiyun #define VSE_CVP_STATUS_CFG_ERR		BIT(19)	/* CVP_CONFIG_ERROR */
31*4882a593Smuzhiyun #define VSE_CVP_STATUS_CVP_EN		BIT(20)	/* ctrl block is enabling CVP */
32*4882a593Smuzhiyun #define VSE_CVP_STATUS_USERMODE		BIT(21)	/* USERMODE */
33*4882a593Smuzhiyun #define VSE_CVP_STATUS_CFG_DONE		BIT(23)	/* CVP_CONFIG_DONE */
34*4882a593Smuzhiyun #define VSE_CVP_STATUS_PLD_CLK_IN_USE	BIT(24)	/* PLD_CLK_IN_USE */
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #define VSE_CVP_MODE_CTRL		0x20	/* 32bit */
37*4882a593Smuzhiyun #define VSE_CVP_MODE_CTRL_CVP_MODE	BIT(0)	/* CVP (1) or normal mode (0) */
38*4882a593Smuzhiyun #define VSE_CVP_MODE_CTRL_HIP_CLK_SEL	BIT(1) /* PMA (1) or fabric clock (0) */
39*4882a593Smuzhiyun #define VSE_CVP_MODE_CTRL_NUMCLKS_OFF	8	/* NUMCLKS bits offset */
40*4882a593Smuzhiyun #define VSE_CVP_MODE_CTRL_NUMCLKS_MASK	GENMASK(15, 8)
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun #define VSE_CVP_DATA			0x28	/* 32bit */
43*4882a593Smuzhiyun #define VSE_CVP_PROG_CTRL		0x2c	/* 32bit */
44*4882a593Smuzhiyun #define VSE_CVP_PROG_CTRL_CONFIG	BIT(0)
45*4882a593Smuzhiyun #define VSE_CVP_PROG_CTRL_START_XFER	BIT(1)
46*4882a593Smuzhiyun #define VSE_CVP_PROG_CTRL_MASK		GENMASK(1, 0)
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #define VSE_UNCOR_ERR_STATUS		0x34	/* 32bit */
49*4882a593Smuzhiyun #define VSE_UNCOR_ERR_CVP_CFG_ERR	BIT(5)	/* CVP_CONFIG_ERROR_LATCHED */
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun #define V1_VSEC_OFFSET			0x200	/* Vendor Specific Offset V1 */
52*4882a593Smuzhiyun /* V2 Defines */
53*4882a593Smuzhiyun #define VSE_CVP_TX_CREDITS		0x49	/* 8bit */
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun #define V2_CREDIT_TIMEOUT_US		20000
56*4882a593Smuzhiyun #define V2_CHECK_CREDIT_US		10
57*4882a593Smuzhiyun #define V2_POLL_TIMEOUT_US		1000000
58*4882a593Smuzhiyun #define V2_USER_TIMEOUT_US		500000
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun #define V1_POLL_TIMEOUT_US		10
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun #define DRV_NAME		"altera-cvp"
63*4882a593Smuzhiyun #define ALTERA_CVP_MGR_NAME	"Altera CvP FPGA Manager"
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun /* Write block sizes */
66*4882a593Smuzhiyun #define ALTERA_CVP_V1_SIZE	4
67*4882a593Smuzhiyun #define ALTERA_CVP_V2_SIZE	4096
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun /* Optional CvP config error status check for debugging */
70*4882a593Smuzhiyun static bool altera_cvp_chkcfg;
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun struct cvp_priv;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun struct altera_cvp_conf {
75*4882a593Smuzhiyun 	struct fpga_manager	*mgr;
76*4882a593Smuzhiyun 	struct pci_dev		*pci_dev;
77*4882a593Smuzhiyun 	void __iomem		*map;
78*4882a593Smuzhiyun 	void			(*write_data)(struct altera_cvp_conf *conf,
79*4882a593Smuzhiyun 					      u32 data);
80*4882a593Smuzhiyun 	char			mgr_name[64];
81*4882a593Smuzhiyun 	u8			numclks;
82*4882a593Smuzhiyun 	u32			sent_packets;
83*4882a593Smuzhiyun 	u32			vsec_offset;
84*4882a593Smuzhiyun 	const struct cvp_priv	*priv;
85*4882a593Smuzhiyun };
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun struct cvp_priv {
88*4882a593Smuzhiyun 	void	(*switch_clk)(struct altera_cvp_conf *conf);
89*4882a593Smuzhiyun 	int	(*clear_state)(struct altera_cvp_conf *conf);
90*4882a593Smuzhiyun 	int	(*wait_credit)(struct fpga_manager *mgr, u32 blocks);
91*4882a593Smuzhiyun 	size_t	block_size;
92*4882a593Smuzhiyun 	int	poll_time_us;
93*4882a593Smuzhiyun 	int	user_time_us;
94*4882a593Smuzhiyun };
95*4882a593Smuzhiyun 
altera_read_config_byte(struct altera_cvp_conf * conf,int where,u8 * val)96*4882a593Smuzhiyun static int altera_read_config_byte(struct altera_cvp_conf *conf,
97*4882a593Smuzhiyun 				   int where, u8 *val)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun 	return pci_read_config_byte(conf->pci_dev, conf->vsec_offset + where,
100*4882a593Smuzhiyun 				    val);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun 
altera_read_config_dword(struct altera_cvp_conf * conf,int where,u32 * val)103*4882a593Smuzhiyun static int altera_read_config_dword(struct altera_cvp_conf *conf,
104*4882a593Smuzhiyun 				    int where, u32 *val)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	return pci_read_config_dword(conf->pci_dev, conf->vsec_offset + where,
107*4882a593Smuzhiyun 				     val);
108*4882a593Smuzhiyun }
109*4882a593Smuzhiyun 
altera_write_config_dword(struct altera_cvp_conf * conf,int where,u32 val)110*4882a593Smuzhiyun static int altera_write_config_dword(struct altera_cvp_conf *conf,
111*4882a593Smuzhiyun 				     int where, u32 val)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun 	return pci_write_config_dword(conf->pci_dev, conf->vsec_offset + where,
114*4882a593Smuzhiyun 				      val);
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun 
altera_cvp_state(struct fpga_manager * mgr)117*4882a593Smuzhiyun static enum fpga_mgr_states altera_cvp_state(struct fpga_manager *mgr)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun 	struct altera_cvp_conf *conf = mgr->priv;
120*4882a593Smuzhiyun 	u32 status;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	altera_read_config_dword(conf, VSE_CVP_STATUS, &status);
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	if (status & VSE_CVP_STATUS_CFG_DONE)
125*4882a593Smuzhiyun 		return FPGA_MGR_STATE_OPERATING;
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun 	if (status & VSE_CVP_STATUS_CVP_EN)
128*4882a593Smuzhiyun 		return FPGA_MGR_STATE_POWER_UP;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	return FPGA_MGR_STATE_UNKNOWN;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun 
altera_cvp_write_data_iomem(struct altera_cvp_conf * conf,u32 val)133*4882a593Smuzhiyun static void altera_cvp_write_data_iomem(struct altera_cvp_conf *conf, u32 val)
134*4882a593Smuzhiyun {
135*4882a593Smuzhiyun 	writel(val, conf->map);
136*4882a593Smuzhiyun }
137*4882a593Smuzhiyun 
altera_cvp_write_data_config(struct altera_cvp_conf * conf,u32 val)138*4882a593Smuzhiyun static void altera_cvp_write_data_config(struct altera_cvp_conf *conf, u32 val)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun 	pci_write_config_dword(conf->pci_dev, conf->vsec_offset + VSE_CVP_DATA,
141*4882a593Smuzhiyun 			       val);
142*4882a593Smuzhiyun }
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun /* switches between CvP clock and internal clock */
altera_cvp_dummy_write(struct altera_cvp_conf * conf)145*4882a593Smuzhiyun static void altera_cvp_dummy_write(struct altera_cvp_conf *conf)
146*4882a593Smuzhiyun {
147*4882a593Smuzhiyun 	unsigned int i;
148*4882a593Smuzhiyun 	u32 val;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	/* set 1 CVP clock cycle for every CVP Data Register Write */
151*4882a593Smuzhiyun 	altera_read_config_dword(conf, VSE_CVP_MODE_CTRL, &val);
152*4882a593Smuzhiyun 	val &= ~VSE_CVP_MODE_CTRL_NUMCLKS_MASK;
153*4882a593Smuzhiyun 	val |= 1 << VSE_CVP_MODE_CTRL_NUMCLKS_OFF;
154*4882a593Smuzhiyun 	altera_write_config_dword(conf, VSE_CVP_MODE_CTRL, val);
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	for (i = 0; i < CVP_DUMMY_WR; i++)
157*4882a593Smuzhiyun 		conf->write_data(conf, 0); /* dummy data, could be any value */
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun 
altera_cvp_wait_status(struct altera_cvp_conf * conf,u32 status_mask,u32 status_val,int timeout_us)160*4882a593Smuzhiyun static int altera_cvp_wait_status(struct altera_cvp_conf *conf, u32 status_mask,
161*4882a593Smuzhiyun 				  u32 status_val, int timeout_us)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun 	unsigned int retries;
164*4882a593Smuzhiyun 	u32 val;
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	retries = timeout_us / 10;
167*4882a593Smuzhiyun 	if (timeout_us % 10)
168*4882a593Smuzhiyun 		retries++;
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	do {
171*4882a593Smuzhiyun 		altera_read_config_dword(conf, VSE_CVP_STATUS, &val);
172*4882a593Smuzhiyun 		if ((val & status_mask) == status_val)
173*4882a593Smuzhiyun 			return 0;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 		/* use small usleep value to re-check and break early */
176*4882a593Smuzhiyun 		usleep_range(10, 11);
177*4882a593Smuzhiyun 	} while (--retries);
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	return -ETIMEDOUT;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun 
altera_cvp_chk_error(struct fpga_manager * mgr,size_t bytes)182*4882a593Smuzhiyun static int altera_cvp_chk_error(struct fpga_manager *mgr, size_t bytes)
183*4882a593Smuzhiyun {
184*4882a593Smuzhiyun 	struct altera_cvp_conf *conf = mgr->priv;
185*4882a593Smuzhiyun 	u32 val;
186*4882a593Smuzhiyun 	int ret;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	/* STEP 10 (optional) - check CVP_CONFIG_ERROR flag */
189*4882a593Smuzhiyun 	ret = altera_read_config_dword(conf, VSE_CVP_STATUS, &val);
190*4882a593Smuzhiyun 	if (ret || (val & VSE_CVP_STATUS_CFG_ERR)) {
191*4882a593Smuzhiyun 		dev_err(&mgr->dev, "CVP_CONFIG_ERROR after %zu bytes!\n",
192*4882a593Smuzhiyun 			bytes);
193*4882a593Smuzhiyun 		return -EPROTO;
194*4882a593Smuzhiyun 	}
195*4882a593Smuzhiyun 	return 0;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun /*
199*4882a593Smuzhiyun  * CvP Version2 Functions
200*4882a593Smuzhiyun  * Recent Intel FPGAs use a credit mechanism to throttle incoming
201*4882a593Smuzhiyun  * bitstreams and a different method of clearing the state.
202*4882a593Smuzhiyun  */
203*4882a593Smuzhiyun 
altera_cvp_v2_clear_state(struct altera_cvp_conf * conf)204*4882a593Smuzhiyun static int altera_cvp_v2_clear_state(struct altera_cvp_conf *conf)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun 	u32 val;
207*4882a593Smuzhiyun 	int ret;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	/* Clear the START_XFER and CVP_CONFIG bits */
210*4882a593Smuzhiyun 	ret = altera_read_config_dword(conf, VSE_CVP_PROG_CTRL, &val);
211*4882a593Smuzhiyun 	if (ret) {
212*4882a593Smuzhiyun 		dev_err(&conf->pci_dev->dev,
213*4882a593Smuzhiyun 			"Error reading CVP Program Control Register\n");
214*4882a593Smuzhiyun 		return ret;
215*4882a593Smuzhiyun 	}
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	val &= ~VSE_CVP_PROG_CTRL_MASK;
218*4882a593Smuzhiyun 	ret = altera_write_config_dword(conf, VSE_CVP_PROG_CTRL, val);
219*4882a593Smuzhiyun 	if (ret) {
220*4882a593Smuzhiyun 		dev_err(&conf->pci_dev->dev,
221*4882a593Smuzhiyun 			"Error writing CVP Program Control Register\n");
222*4882a593Smuzhiyun 		return ret;
223*4882a593Smuzhiyun 	}
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	return altera_cvp_wait_status(conf, VSE_CVP_STATUS_CFG_RDY, 0,
226*4882a593Smuzhiyun 				      conf->priv->poll_time_us);
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun 
altera_cvp_v2_wait_for_credit(struct fpga_manager * mgr,u32 blocks)229*4882a593Smuzhiyun static int altera_cvp_v2_wait_for_credit(struct fpga_manager *mgr,
230*4882a593Smuzhiyun 					 u32 blocks)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun 	u32 timeout = V2_CREDIT_TIMEOUT_US / V2_CHECK_CREDIT_US;
233*4882a593Smuzhiyun 	struct altera_cvp_conf *conf = mgr->priv;
234*4882a593Smuzhiyun 	int ret;
235*4882a593Smuzhiyun 	u8 val;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	do {
238*4882a593Smuzhiyun 		ret = altera_read_config_byte(conf, VSE_CVP_TX_CREDITS, &val);
239*4882a593Smuzhiyun 		if (ret) {
240*4882a593Smuzhiyun 			dev_err(&conf->pci_dev->dev,
241*4882a593Smuzhiyun 				"Error reading CVP Credit Register\n");
242*4882a593Smuzhiyun 			return ret;
243*4882a593Smuzhiyun 		}
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 		/* Return if there is space in FIFO */
246*4882a593Smuzhiyun 		if (val - (u8)conf->sent_packets)
247*4882a593Smuzhiyun 			return 0;
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 		ret = altera_cvp_chk_error(mgr, blocks * ALTERA_CVP_V2_SIZE);
250*4882a593Smuzhiyun 		if (ret) {
251*4882a593Smuzhiyun 			dev_err(&conf->pci_dev->dev,
252*4882a593Smuzhiyun 				"CE Bit error credit reg[0x%x]:sent[0x%x]\n",
253*4882a593Smuzhiyun 				val, conf->sent_packets);
254*4882a593Smuzhiyun 			return -EAGAIN;
255*4882a593Smuzhiyun 		}
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 		/* Limit the check credit byte traffic */
258*4882a593Smuzhiyun 		usleep_range(V2_CHECK_CREDIT_US, V2_CHECK_CREDIT_US + 1);
259*4882a593Smuzhiyun 	} while (timeout--);
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	dev_err(&conf->pci_dev->dev, "Timeout waiting for credit\n");
262*4882a593Smuzhiyun 	return -ETIMEDOUT;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun 
altera_cvp_send_block(struct altera_cvp_conf * conf,const u32 * data,size_t len)265*4882a593Smuzhiyun static int altera_cvp_send_block(struct altera_cvp_conf *conf,
266*4882a593Smuzhiyun 				 const u32 *data, size_t len)
267*4882a593Smuzhiyun {
268*4882a593Smuzhiyun 	u32 mask, words = len / sizeof(u32);
269*4882a593Smuzhiyun 	int i, remainder;
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun 	for (i = 0; i < words; i++)
272*4882a593Smuzhiyun 		conf->write_data(conf, *data++);
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	/* write up to 3 trailing bytes, if any */
275*4882a593Smuzhiyun 	remainder = len % sizeof(u32);
276*4882a593Smuzhiyun 	if (remainder) {
277*4882a593Smuzhiyun 		mask = BIT(remainder * 8) - 1;
278*4882a593Smuzhiyun 		if (mask)
279*4882a593Smuzhiyun 			conf->write_data(conf, *data & mask);
280*4882a593Smuzhiyun 	}
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	return 0;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun 
altera_cvp_teardown(struct fpga_manager * mgr,struct fpga_image_info * info)285*4882a593Smuzhiyun static int altera_cvp_teardown(struct fpga_manager *mgr,
286*4882a593Smuzhiyun 			       struct fpga_image_info *info)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun 	struct altera_cvp_conf *conf = mgr->priv;
289*4882a593Smuzhiyun 	int ret;
290*4882a593Smuzhiyun 	u32 val;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	/* STEP 12 - reset START_XFER bit */
293*4882a593Smuzhiyun 	altera_read_config_dword(conf, VSE_CVP_PROG_CTRL, &val);
294*4882a593Smuzhiyun 	val &= ~VSE_CVP_PROG_CTRL_START_XFER;
295*4882a593Smuzhiyun 	altera_write_config_dword(conf, VSE_CVP_PROG_CTRL, val);
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 	/* STEP 13 - reset CVP_CONFIG bit */
298*4882a593Smuzhiyun 	val &= ~VSE_CVP_PROG_CTRL_CONFIG;
299*4882a593Smuzhiyun 	altera_write_config_dword(conf, VSE_CVP_PROG_CTRL, val);
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	/*
302*4882a593Smuzhiyun 	 * STEP 14
303*4882a593Smuzhiyun 	 * - set CVP_NUMCLKS to 1 and then issue CVP_DUMMY_WR dummy
304*4882a593Smuzhiyun 	 *   writes to the HIP
305*4882a593Smuzhiyun 	 */
306*4882a593Smuzhiyun 	if (conf->priv->switch_clk)
307*4882a593Smuzhiyun 		conf->priv->switch_clk(conf);
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 	/* STEP 15 - poll CVP_CONFIG_READY bit for 0 with 10us timeout */
310*4882a593Smuzhiyun 	ret = altera_cvp_wait_status(conf, VSE_CVP_STATUS_CFG_RDY, 0,
311*4882a593Smuzhiyun 				     conf->priv->poll_time_us);
312*4882a593Smuzhiyun 	if (ret)
313*4882a593Smuzhiyun 		dev_err(&mgr->dev, "CFG_RDY == 0 timeout\n");
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	return ret;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun 
altera_cvp_write_init(struct fpga_manager * mgr,struct fpga_image_info * info,const char * buf,size_t count)318*4882a593Smuzhiyun static int altera_cvp_write_init(struct fpga_manager *mgr,
319*4882a593Smuzhiyun 				 struct fpga_image_info *info,
320*4882a593Smuzhiyun 				 const char *buf, size_t count)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun 	struct altera_cvp_conf *conf = mgr->priv;
323*4882a593Smuzhiyun 	u32 iflags, val;
324*4882a593Smuzhiyun 	int ret;
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	iflags = info ? info->flags : 0;
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	if (iflags & FPGA_MGR_PARTIAL_RECONFIG) {
329*4882a593Smuzhiyun 		dev_err(&mgr->dev, "Partial reconfiguration not supported.\n");
330*4882a593Smuzhiyun 		return -EINVAL;
331*4882a593Smuzhiyun 	}
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	/* Determine allowed clock to data ratio */
334*4882a593Smuzhiyun 	if (iflags & FPGA_MGR_COMPRESSED_BITSTREAM)
335*4882a593Smuzhiyun 		conf->numclks = 8; /* ratio for all compressed images */
336*4882a593Smuzhiyun 	else if (iflags & FPGA_MGR_ENCRYPTED_BITSTREAM)
337*4882a593Smuzhiyun 		conf->numclks = 4; /* for uncompressed and encrypted images */
338*4882a593Smuzhiyun 	else
339*4882a593Smuzhiyun 		conf->numclks = 1; /* for uncompressed and unencrypted images */
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	/* STEP 1 - read CVP status and check CVP_EN flag */
342*4882a593Smuzhiyun 	altera_read_config_dword(conf, VSE_CVP_STATUS, &val);
343*4882a593Smuzhiyun 	if (!(val & VSE_CVP_STATUS_CVP_EN)) {
344*4882a593Smuzhiyun 		dev_err(&mgr->dev, "CVP mode off: 0x%04x\n", val);
345*4882a593Smuzhiyun 		return -ENODEV;
346*4882a593Smuzhiyun 	}
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	if (val & VSE_CVP_STATUS_CFG_RDY) {
349*4882a593Smuzhiyun 		dev_warn(&mgr->dev, "CvP already started, teardown first\n");
350*4882a593Smuzhiyun 		ret = altera_cvp_teardown(mgr, info);
351*4882a593Smuzhiyun 		if (ret)
352*4882a593Smuzhiyun 			return ret;
353*4882a593Smuzhiyun 	}
354*4882a593Smuzhiyun 
355*4882a593Smuzhiyun 	/*
356*4882a593Smuzhiyun 	 * STEP 2
357*4882a593Smuzhiyun 	 * - set HIP_CLK_SEL and CVP_MODE (must be set in the order mentioned)
358*4882a593Smuzhiyun 	 */
359*4882a593Smuzhiyun 	/* switch from fabric to PMA clock */
360*4882a593Smuzhiyun 	altera_read_config_dword(conf, VSE_CVP_MODE_CTRL, &val);
361*4882a593Smuzhiyun 	val |= VSE_CVP_MODE_CTRL_HIP_CLK_SEL;
362*4882a593Smuzhiyun 	altera_write_config_dword(conf, VSE_CVP_MODE_CTRL, val);
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	/* set CVP mode */
365*4882a593Smuzhiyun 	altera_read_config_dword(conf, VSE_CVP_MODE_CTRL, &val);
366*4882a593Smuzhiyun 	val |= VSE_CVP_MODE_CTRL_CVP_MODE;
367*4882a593Smuzhiyun 	altera_write_config_dword(conf, VSE_CVP_MODE_CTRL, val);
368*4882a593Smuzhiyun 
369*4882a593Smuzhiyun 	/*
370*4882a593Smuzhiyun 	 * STEP 3
371*4882a593Smuzhiyun 	 * - set CVP_NUMCLKS to 1 and issue CVP_DUMMY_WR dummy writes to the HIP
372*4882a593Smuzhiyun 	 */
373*4882a593Smuzhiyun 	if (conf->priv->switch_clk)
374*4882a593Smuzhiyun 		conf->priv->switch_clk(conf);
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	if (conf->priv->clear_state) {
377*4882a593Smuzhiyun 		ret = conf->priv->clear_state(conf);
378*4882a593Smuzhiyun 		if (ret) {
379*4882a593Smuzhiyun 			dev_err(&mgr->dev, "Problem clearing out state\n");
380*4882a593Smuzhiyun 			return ret;
381*4882a593Smuzhiyun 		}
382*4882a593Smuzhiyun 	}
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	conf->sent_packets = 0;
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	/* STEP 4 - set CVP_CONFIG bit */
387*4882a593Smuzhiyun 	altera_read_config_dword(conf, VSE_CVP_PROG_CTRL, &val);
388*4882a593Smuzhiyun 	/* request control block to begin transfer using CVP */
389*4882a593Smuzhiyun 	val |= VSE_CVP_PROG_CTRL_CONFIG;
390*4882a593Smuzhiyun 	altera_write_config_dword(conf, VSE_CVP_PROG_CTRL, val);
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	/* STEP 5 - poll CVP_CONFIG READY for 1 with timeout */
393*4882a593Smuzhiyun 	ret = altera_cvp_wait_status(conf, VSE_CVP_STATUS_CFG_RDY,
394*4882a593Smuzhiyun 				     VSE_CVP_STATUS_CFG_RDY,
395*4882a593Smuzhiyun 				     conf->priv->poll_time_us);
396*4882a593Smuzhiyun 	if (ret) {
397*4882a593Smuzhiyun 		dev_warn(&mgr->dev, "CFG_RDY == 1 timeout\n");
398*4882a593Smuzhiyun 		return ret;
399*4882a593Smuzhiyun 	}
400*4882a593Smuzhiyun 
401*4882a593Smuzhiyun 	/*
402*4882a593Smuzhiyun 	 * STEP 6
403*4882a593Smuzhiyun 	 * - set CVP_NUMCLKS to 1 and issue CVP_DUMMY_WR dummy writes to the HIP
404*4882a593Smuzhiyun 	 */
405*4882a593Smuzhiyun 	if (conf->priv->switch_clk)
406*4882a593Smuzhiyun 		conf->priv->switch_clk(conf);
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	if (altera_cvp_chkcfg) {
409*4882a593Smuzhiyun 		ret = altera_cvp_chk_error(mgr, 0);
410*4882a593Smuzhiyun 		if (ret) {
411*4882a593Smuzhiyun 			dev_warn(&mgr->dev, "CFG_RDY == 1 timeout\n");
412*4882a593Smuzhiyun 			return ret;
413*4882a593Smuzhiyun 		}
414*4882a593Smuzhiyun 	}
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	/* STEP 7 - set START_XFER */
417*4882a593Smuzhiyun 	altera_read_config_dword(conf, VSE_CVP_PROG_CTRL, &val);
418*4882a593Smuzhiyun 	val |= VSE_CVP_PROG_CTRL_START_XFER;
419*4882a593Smuzhiyun 	altera_write_config_dword(conf, VSE_CVP_PROG_CTRL, val);
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	/* STEP 8 - start transfer (set CVP_NUMCLKS for bitstream) */
422*4882a593Smuzhiyun 	if (conf->priv->switch_clk) {
423*4882a593Smuzhiyun 		altera_read_config_dword(conf, VSE_CVP_MODE_CTRL, &val);
424*4882a593Smuzhiyun 		val &= ~VSE_CVP_MODE_CTRL_NUMCLKS_MASK;
425*4882a593Smuzhiyun 		val |= conf->numclks << VSE_CVP_MODE_CTRL_NUMCLKS_OFF;
426*4882a593Smuzhiyun 		altera_write_config_dword(conf, VSE_CVP_MODE_CTRL, val);
427*4882a593Smuzhiyun 	}
428*4882a593Smuzhiyun 	return 0;
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun 
altera_cvp_write(struct fpga_manager * mgr,const char * buf,size_t count)431*4882a593Smuzhiyun static int altera_cvp_write(struct fpga_manager *mgr, const char *buf,
432*4882a593Smuzhiyun 			    size_t count)
433*4882a593Smuzhiyun {
434*4882a593Smuzhiyun 	struct altera_cvp_conf *conf = mgr->priv;
435*4882a593Smuzhiyun 	size_t done, remaining, len;
436*4882a593Smuzhiyun 	const u32 *data;
437*4882a593Smuzhiyun 	int status = 0;
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	/* STEP 9 - write 32-bit data from RBF file to CVP data register */
440*4882a593Smuzhiyun 	data = (u32 *)buf;
441*4882a593Smuzhiyun 	remaining = count;
442*4882a593Smuzhiyun 	done = 0;
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 	while (remaining) {
445*4882a593Smuzhiyun 		/* Use credit throttling if available */
446*4882a593Smuzhiyun 		if (conf->priv->wait_credit) {
447*4882a593Smuzhiyun 			status = conf->priv->wait_credit(mgr, done);
448*4882a593Smuzhiyun 			if (status) {
449*4882a593Smuzhiyun 				dev_err(&conf->pci_dev->dev,
450*4882a593Smuzhiyun 					"Wait Credit ERR: 0x%x\n", status);
451*4882a593Smuzhiyun 				return status;
452*4882a593Smuzhiyun 			}
453*4882a593Smuzhiyun 		}
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 		len = min(conf->priv->block_size, remaining);
456*4882a593Smuzhiyun 		altera_cvp_send_block(conf, data, len);
457*4882a593Smuzhiyun 		data += len / sizeof(u32);
458*4882a593Smuzhiyun 		done += len;
459*4882a593Smuzhiyun 		remaining -= len;
460*4882a593Smuzhiyun 		conf->sent_packets++;
461*4882a593Smuzhiyun 
462*4882a593Smuzhiyun 		/*
463*4882a593Smuzhiyun 		 * STEP 10 (optional) and STEP 11
464*4882a593Smuzhiyun 		 * - check error flag
465*4882a593Smuzhiyun 		 * - loop until data transfer completed
466*4882a593Smuzhiyun 		 * Config images can be huge (more than 40 MiB), so
467*4882a593Smuzhiyun 		 * only check after a new 4k data block has been written.
468*4882a593Smuzhiyun 		 * This reduces the number of checks and speeds up the
469*4882a593Smuzhiyun 		 * configuration process.
470*4882a593Smuzhiyun 		 */
471*4882a593Smuzhiyun 		if (altera_cvp_chkcfg && !(done % SZ_4K)) {
472*4882a593Smuzhiyun 			status = altera_cvp_chk_error(mgr, done);
473*4882a593Smuzhiyun 			if (status < 0)
474*4882a593Smuzhiyun 				return status;
475*4882a593Smuzhiyun 		}
476*4882a593Smuzhiyun 	}
477*4882a593Smuzhiyun 
478*4882a593Smuzhiyun 	if (altera_cvp_chkcfg)
479*4882a593Smuzhiyun 		status = altera_cvp_chk_error(mgr, count);
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	return status;
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun 
altera_cvp_write_complete(struct fpga_manager * mgr,struct fpga_image_info * info)484*4882a593Smuzhiyun static int altera_cvp_write_complete(struct fpga_manager *mgr,
485*4882a593Smuzhiyun 				     struct fpga_image_info *info)
486*4882a593Smuzhiyun {
487*4882a593Smuzhiyun 	struct altera_cvp_conf *conf = mgr->priv;
488*4882a593Smuzhiyun 	u32 mask, val;
489*4882a593Smuzhiyun 	int ret;
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	ret = altera_cvp_teardown(mgr, info);
492*4882a593Smuzhiyun 	if (ret)
493*4882a593Smuzhiyun 		return ret;
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	/* STEP 16 - check CVP_CONFIG_ERROR_LATCHED bit */
496*4882a593Smuzhiyun 	altera_read_config_dword(conf, VSE_UNCOR_ERR_STATUS, &val);
497*4882a593Smuzhiyun 	if (val & VSE_UNCOR_ERR_CVP_CFG_ERR) {
498*4882a593Smuzhiyun 		dev_err(&mgr->dev, "detected CVP_CONFIG_ERROR_LATCHED!\n");
499*4882a593Smuzhiyun 		return -EPROTO;
500*4882a593Smuzhiyun 	}
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	/* STEP 17 - reset CVP_MODE and HIP_CLK_SEL bit */
503*4882a593Smuzhiyun 	altera_read_config_dword(conf, VSE_CVP_MODE_CTRL, &val);
504*4882a593Smuzhiyun 	val &= ~VSE_CVP_MODE_CTRL_HIP_CLK_SEL;
505*4882a593Smuzhiyun 	val &= ~VSE_CVP_MODE_CTRL_CVP_MODE;
506*4882a593Smuzhiyun 	altera_write_config_dword(conf, VSE_CVP_MODE_CTRL, val);
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 	/* STEP 18 - poll PLD_CLK_IN_USE and USER_MODE bits */
509*4882a593Smuzhiyun 	mask = VSE_CVP_STATUS_PLD_CLK_IN_USE | VSE_CVP_STATUS_USERMODE;
510*4882a593Smuzhiyun 	ret = altera_cvp_wait_status(conf, mask, mask,
511*4882a593Smuzhiyun 				     conf->priv->user_time_us);
512*4882a593Smuzhiyun 	if (ret)
513*4882a593Smuzhiyun 		dev_err(&mgr->dev, "PLD_CLK_IN_USE|USERMODE timeout\n");
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	return ret;
516*4882a593Smuzhiyun }
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun static const struct fpga_manager_ops altera_cvp_ops = {
519*4882a593Smuzhiyun 	.state		= altera_cvp_state,
520*4882a593Smuzhiyun 	.write_init	= altera_cvp_write_init,
521*4882a593Smuzhiyun 	.write		= altera_cvp_write,
522*4882a593Smuzhiyun 	.write_complete	= altera_cvp_write_complete,
523*4882a593Smuzhiyun };
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun static const struct cvp_priv cvp_priv_v1 = {
526*4882a593Smuzhiyun 	.switch_clk	= altera_cvp_dummy_write,
527*4882a593Smuzhiyun 	.block_size	= ALTERA_CVP_V1_SIZE,
528*4882a593Smuzhiyun 	.poll_time_us	= V1_POLL_TIMEOUT_US,
529*4882a593Smuzhiyun 	.user_time_us	= TIMEOUT_US,
530*4882a593Smuzhiyun };
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun static const struct cvp_priv cvp_priv_v2 = {
533*4882a593Smuzhiyun 	.clear_state	= altera_cvp_v2_clear_state,
534*4882a593Smuzhiyun 	.wait_credit	= altera_cvp_v2_wait_for_credit,
535*4882a593Smuzhiyun 	.block_size	= ALTERA_CVP_V2_SIZE,
536*4882a593Smuzhiyun 	.poll_time_us	= V2_POLL_TIMEOUT_US,
537*4882a593Smuzhiyun 	.user_time_us	= V2_USER_TIMEOUT_US,
538*4882a593Smuzhiyun };
539*4882a593Smuzhiyun 
chkcfg_show(struct device_driver * dev,char * buf)540*4882a593Smuzhiyun static ssize_t chkcfg_show(struct device_driver *dev, char *buf)
541*4882a593Smuzhiyun {
542*4882a593Smuzhiyun 	return snprintf(buf, 3, "%d\n", altera_cvp_chkcfg);
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun 
chkcfg_store(struct device_driver * drv,const char * buf,size_t count)545*4882a593Smuzhiyun static ssize_t chkcfg_store(struct device_driver *drv, const char *buf,
546*4882a593Smuzhiyun 			    size_t count)
547*4882a593Smuzhiyun {
548*4882a593Smuzhiyun 	int ret;
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	ret = kstrtobool(buf, &altera_cvp_chkcfg);
551*4882a593Smuzhiyun 	if (ret)
552*4882a593Smuzhiyun 		return ret;
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	return count;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun static DRIVER_ATTR_RW(chkcfg);
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun static int altera_cvp_probe(struct pci_dev *pdev,
560*4882a593Smuzhiyun 			    const struct pci_device_id *dev_id);
561*4882a593Smuzhiyun static void altera_cvp_remove(struct pci_dev *pdev);
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun static struct pci_device_id altera_cvp_id_tbl[] = {
564*4882a593Smuzhiyun 	{ PCI_VDEVICE(ALTERA, PCI_ANY_ID) },
565*4882a593Smuzhiyun 	{ }
566*4882a593Smuzhiyun };
567*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, altera_cvp_id_tbl);
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun static struct pci_driver altera_cvp_driver = {
570*4882a593Smuzhiyun 	.name   = DRV_NAME,
571*4882a593Smuzhiyun 	.id_table = altera_cvp_id_tbl,
572*4882a593Smuzhiyun 	.probe  = altera_cvp_probe,
573*4882a593Smuzhiyun 	.remove = altera_cvp_remove,
574*4882a593Smuzhiyun };
575*4882a593Smuzhiyun 
altera_cvp_probe(struct pci_dev * pdev,const struct pci_device_id * dev_id)576*4882a593Smuzhiyun static int altera_cvp_probe(struct pci_dev *pdev,
577*4882a593Smuzhiyun 			    const struct pci_device_id *dev_id)
578*4882a593Smuzhiyun {
579*4882a593Smuzhiyun 	struct altera_cvp_conf *conf;
580*4882a593Smuzhiyun 	struct fpga_manager *mgr;
581*4882a593Smuzhiyun 	int ret, offset;
582*4882a593Smuzhiyun 	u16 cmd, val;
583*4882a593Smuzhiyun 	u32 regval;
584*4882a593Smuzhiyun 
585*4882a593Smuzhiyun 	/* Discover the Vendor Specific Offset for this device */
586*4882a593Smuzhiyun 	offset = pci_find_next_ext_capability(pdev, 0, PCI_EXT_CAP_ID_VNDR);
587*4882a593Smuzhiyun 	if (!offset) {
588*4882a593Smuzhiyun 		dev_err(&pdev->dev, "No Vendor Specific Offset.\n");
589*4882a593Smuzhiyun 		return -ENODEV;
590*4882a593Smuzhiyun 	}
591*4882a593Smuzhiyun 
592*4882a593Smuzhiyun 	/*
593*4882a593Smuzhiyun 	 * First check if this is the expected FPGA device. PCI config
594*4882a593Smuzhiyun 	 * space access works without enabling the PCI device, memory
595*4882a593Smuzhiyun 	 * space access is enabled further down.
596*4882a593Smuzhiyun 	 */
597*4882a593Smuzhiyun 	pci_read_config_word(pdev, offset + VSE_PCIE_EXT_CAP_ID, &val);
598*4882a593Smuzhiyun 	if (val != VSE_PCIE_EXT_CAP_ID_VAL) {
599*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Wrong EXT_CAP_ID value 0x%x\n", val);
600*4882a593Smuzhiyun 		return -ENODEV;
601*4882a593Smuzhiyun 	}
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 	pci_read_config_dword(pdev, offset + VSE_CVP_STATUS, &regval);
604*4882a593Smuzhiyun 	if (!(regval & VSE_CVP_STATUS_CVP_EN)) {
605*4882a593Smuzhiyun 		dev_err(&pdev->dev,
606*4882a593Smuzhiyun 			"CVP is disabled for this device: CVP_STATUS Reg 0x%x\n",
607*4882a593Smuzhiyun 			regval);
608*4882a593Smuzhiyun 		return -ENODEV;
609*4882a593Smuzhiyun 	}
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	conf = devm_kzalloc(&pdev->dev, sizeof(*conf), GFP_KERNEL);
612*4882a593Smuzhiyun 	if (!conf)
613*4882a593Smuzhiyun 		return -ENOMEM;
614*4882a593Smuzhiyun 
615*4882a593Smuzhiyun 	conf->vsec_offset = offset;
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	/*
618*4882a593Smuzhiyun 	 * Enable memory BAR access. We cannot use pci_enable_device() here
619*4882a593Smuzhiyun 	 * because it will make the driver unusable with FPGA devices that
620*4882a593Smuzhiyun 	 * have additional big IOMEM resources (e.g. 4GiB BARs) on 32-bit
621*4882a593Smuzhiyun 	 * platform. Such BARs will not have an assigned address range and
622*4882a593Smuzhiyun 	 * pci_enable_device() will fail, complaining about not claimed BAR,
623*4882a593Smuzhiyun 	 * even if the concerned BAR is not needed for FPGA configuration
624*4882a593Smuzhiyun 	 * at all. Thus, enable the device via PCI config space command.
625*4882a593Smuzhiyun 	 */
626*4882a593Smuzhiyun 	pci_read_config_word(pdev, PCI_COMMAND, &cmd);
627*4882a593Smuzhiyun 	if (!(cmd & PCI_COMMAND_MEMORY)) {
628*4882a593Smuzhiyun 		cmd |= PCI_COMMAND_MEMORY;
629*4882a593Smuzhiyun 		pci_write_config_word(pdev, PCI_COMMAND, cmd);
630*4882a593Smuzhiyun 	}
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	ret = pci_request_region(pdev, CVP_BAR, "CVP");
633*4882a593Smuzhiyun 	if (ret) {
634*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Requesting CVP BAR region failed\n");
635*4882a593Smuzhiyun 		goto err_disable;
636*4882a593Smuzhiyun 	}
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 	conf->pci_dev = pdev;
639*4882a593Smuzhiyun 	conf->write_data = altera_cvp_write_data_iomem;
640*4882a593Smuzhiyun 
641*4882a593Smuzhiyun 	if (conf->vsec_offset == V1_VSEC_OFFSET)
642*4882a593Smuzhiyun 		conf->priv = &cvp_priv_v1;
643*4882a593Smuzhiyun 	else
644*4882a593Smuzhiyun 		conf->priv = &cvp_priv_v2;
645*4882a593Smuzhiyun 
646*4882a593Smuzhiyun 	conf->map = pci_iomap(pdev, CVP_BAR, 0);
647*4882a593Smuzhiyun 	if (!conf->map) {
648*4882a593Smuzhiyun 		dev_warn(&pdev->dev, "Mapping CVP BAR failed\n");
649*4882a593Smuzhiyun 		conf->write_data = altera_cvp_write_data_config;
650*4882a593Smuzhiyun 	}
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun 	snprintf(conf->mgr_name, sizeof(conf->mgr_name), "%s @%s",
653*4882a593Smuzhiyun 		 ALTERA_CVP_MGR_NAME, pci_name(pdev));
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 	mgr = devm_fpga_mgr_create(&pdev->dev, conf->mgr_name,
656*4882a593Smuzhiyun 				   &altera_cvp_ops, conf);
657*4882a593Smuzhiyun 	if (!mgr) {
658*4882a593Smuzhiyun 		ret = -ENOMEM;
659*4882a593Smuzhiyun 		goto err_unmap;
660*4882a593Smuzhiyun 	}
661*4882a593Smuzhiyun 
662*4882a593Smuzhiyun 	pci_set_drvdata(pdev, mgr);
663*4882a593Smuzhiyun 
664*4882a593Smuzhiyun 	ret = fpga_mgr_register(mgr);
665*4882a593Smuzhiyun 	if (ret)
666*4882a593Smuzhiyun 		goto err_unmap;
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 	return 0;
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun err_unmap:
671*4882a593Smuzhiyun 	if (conf->map)
672*4882a593Smuzhiyun 		pci_iounmap(pdev, conf->map);
673*4882a593Smuzhiyun 	pci_release_region(pdev, CVP_BAR);
674*4882a593Smuzhiyun err_disable:
675*4882a593Smuzhiyun 	cmd &= ~PCI_COMMAND_MEMORY;
676*4882a593Smuzhiyun 	pci_write_config_word(pdev, PCI_COMMAND, cmd);
677*4882a593Smuzhiyun 	return ret;
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun 
altera_cvp_remove(struct pci_dev * pdev)680*4882a593Smuzhiyun static void altera_cvp_remove(struct pci_dev *pdev)
681*4882a593Smuzhiyun {
682*4882a593Smuzhiyun 	struct fpga_manager *mgr = pci_get_drvdata(pdev);
683*4882a593Smuzhiyun 	struct altera_cvp_conf *conf = mgr->priv;
684*4882a593Smuzhiyun 	u16 cmd;
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 	fpga_mgr_unregister(mgr);
687*4882a593Smuzhiyun 	if (conf->map)
688*4882a593Smuzhiyun 		pci_iounmap(pdev, conf->map);
689*4882a593Smuzhiyun 	pci_release_region(pdev, CVP_BAR);
690*4882a593Smuzhiyun 	pci_read_config_word(pdev, PCI_COMMAND, &cmd);
691*4882a593Smuzhiyun 	cmd &= ~PCI_COMMAND_MEMORY;
692*4882a593Smuzhiyun 	pci_write_config_word(pdev, PCI_COMMAND, cmd);
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun 
altera_cvp_init(void)695*4882a593Smuzhiyun static int __init altera_cvp_init(void)
696*4882a593Smuzhiyun {
697*4882a593Smuzhiyun 	int ret;
698*4882a593Smuzhiyun 
699*4882a593Smuzhiyun 	ret = pci_register_driver(&altera_cvp_driver);
700*4882a593Smuzhiyun 	if (ret)
701*4882a593Smuzhiyun 		return ret;
702*4882a593Smuzhiyun 
703*4882a593Smuzhiyun 	ret = driver_create_file(&altera_cvp_driver.driver,
704*4882a593Smuzhiyun 				 &driver_attr_chkcfg);
705*4882a593Smuzhiyun 	if (ret)
706*4882a593Smuzhiyun 		pr_warn("Can't create sysfs chkcfg file\n");
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun 	return 0;
709*4882a593Smuzhiyun }
710*4882a593Smuzhiyun 
altera_cvp_exit(void)711*4882a593Smuzhiyun static void __exit altera_cvp_exit(void)
712*4882a593Smuzhiyun {
713*4882a593Smuzhiyun 	driver_remove_file(&altera_cvp_driver.driver, &driver_attr_chkcfg);
714*4882a593Smuzhiyun 	pci_unregister_driver(&altera_cvp_driver);
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun module_init(altera_cvp_init);
718*4882a593Smuzhiyun module_exit(altera_cvp_exit);
719*4882a593Smuzhiyun 
720*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
721*4882a593Smuzhiyun MODULE_AUTHOR("Anatolij Gustschin <agust@denx.de>");
722*4882a593Smuzhiyun MODULE_DESCRIPTION("Module to load Altera FPGA over CvP");
723