1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2011-2015 Xilinx Inc.
4*4882a593Smuzhiyun * Copyright (c) 2015, National Instruments Corp.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * FPGA Manager Driver for Xilinx Zynq, heavily based on xdevcfg driver
7*4882a593Smuzhiyun * in their vendor tree.
8*4882a593Smuzhiyun */
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/clk.h>
11*4882a593Smuzhiyun #include <linux/completion.h>
12*4882a593Smuzhiyun #include <linux/delay.h>
13*4882a593Smuzhiyun #include <linux/dma-mapping.h>
14*4882a593Smuzhiyun #include <linux/fpga/fpga-mgr.h>
15*4882a593Smuzhiyun #include <linux/interrupt.h>
16*4882a593Smuzhiyun #include <linux/io.h>
17*4882a593Smuzhiyun #include <linux/iopoll.h>
18*4882a593Smuzhiyun #include <linux/module.h>
19*4882a593Smuzhiyun #include <linux/mfd/syscon.h>
20*4882a593Smuzhiyun #include <linux/of_address.h>
21*4882a593Smuzhiyun #include <linux/of_irq.h>
22*4882a593Smuzhiyun #include <linux/pm.h>
23*4882a593Smuzhiyun #include <linux/regmap.h>
24*4882a593Smuzhiyun #include <linux/string.h>
25*4882a593Smuzhiyun #include <linux/scatterlist.h>
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun /* Offsets into SLCR regmap */
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun /* FPGA Software Reset Control */
30*4882a593Smuzhiyun #define SLCR_FPGA_RST_CTRL_OFFSET 0x240
31*4882a593Smuzhiyun /* Level Shifters Enable */
32*4882a593Smuzhiyun #define SLCR_LVL_SHFTR_EN_OFFSET 0x900
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun /* Constant Definitions */
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun /* Control Register */
37*4882a593Smuzhiyun #define CTRL_OFFSET 0x00
38*4882a593Smuzhiyun /* Lock Register */
39*4882a593Smuzhiyun #define LOCK_OFFSET 0x04
40*4882a593Smuzhiyun /* Interrupt Status Register */
41*4882a593Smuzhiyun #define INT_STS_OFFSET 0x0c
42*4882a593Smuzhiyun /* Interrupt Mask Register */
43*4882a593Smuzhiyun #define INT_MASK_OFFSET 0x10
44*4882a593Smuzhiyun /* Status Register */
45*4882a593Smuzhiyun #define STATUS_OFFSET 0x14
46*4882a593Smuzhiyun /* DMA Source Address Register */
47*4882a593Smuzhiyun #define DMA_SRC_ADDR_OFFSET 0x18
48*4882a593Smuzhiyun /* DMA Destination Address Reg */
49*4882a593Smuzhiyun #define DMA_DST_ADDR_OFFSET 0x1c
50*4882a593Smuzhiyun /* DMA Source Transfer Length */
51*4882a593Smuzhiyun #define DMA_SRC_LEN_OFFSET 0x20
52*4882a593Smuzhiyun /* DMA Destination Transfer */
53*4882a593Smuzhiyun #define DMA_DEST_LEN_OFFSET 0x24
54*4882a593Smuzhiyun /* Unlock Register */
55*4882a593Smuzhiyun #define UNLOCK_OFFSET 0x34
56*4882a593Smuzhiyun /* Misc. Control Register */
57*4882a593Smuzhiyun #define MCTRL_OFFSET 0x80
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun /* Control Register Bit definitions */
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun /* Signal to reset FPGA */
62*4882a593Smuzhiyun #define CTRL_PCFG_PROG_B_MASK BIT(30)
63*4882a593Smuzhiyun /* Enable PCAP for PR */
64*4882a593Smuzhiyun #define CTRL_PCAP_PR_MASK BIT(27)
65*4882a593Smuzhiyun /* Enable PCAP */
66*4882a593Smuzhiyun #define CTRL_PCAP_MODE_MASK BIT(26)
67*4882a593Smuzhiyun /* Lower rate to allow decrypt on the fly */
68*4882a593Smuzhiyun #define CTRL_PCAP_RATE_EN_MASK BIT(25)
69*4882a593Smuzhiyun /* System booted in secure mode */
70*4882a593Smuzhiyun #define CTRL_SEC_EN_MASK BIT(7)
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /* Miscellaneous Control Register bit definitions */
73*4882a593Smuzhiyun /* Internal PCAP loopback */
74*4882a593Smuzhiyun #define MCTRL_PCAP_LPBK_MASK BIT(4)
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun /* Status register bit definitions */
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /* FPGA init status */
79*4882a593Smuzhiyun #define STATUS_DMA_Q_F BIT(31)
80*4882a593Smuzhiyun #define STATUS_DMA_Q_E BIT(30)
81*4882a593Smuzhiyun #define STATUS_PCFG_INIT_MASK BIT(4)
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun /* Interrupt Status/Mask Register Bit definitions */
84*4882a593Smuzhiyun /* DMA command done */
85*4882a593Smuzhiyun #define IXR_DMA_DONE_MASK BIT(13)
86*4882a593Smuzhiyun /* DMA and PCAP cmd done */
87*4882a593Smuzhiyun #define IXR_D_P_DONE_MASK BIT(12)
88*4882a593Smuzhiyun /* FPGA programmed */
89*4882a593Smuzhiyun #define IXR_PCFG_DONE_MASK BIT(2)
90*4882a593Smuzhiyun #define IXR_ERROR_FLAGS_MASK 0x00F0C860
91*4882a593Smuzhiyun #define IXR_ALL_MASK 0xF8F7F87F
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /* Miscellaneous constant values */
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun /* Invalid DMA addr */
96*4882a593Smuzhiyun #define DMA_INVALID_ADDRESS GENMASK(31, 0)
97*4882a593Smuzhiyun /* Used to unlock the dev */
98*4882a593Smuzhiyun #define UNLOCK_MASK 0x757bdf0d
99*4882a593Smuzhiyun /* Timeout for polling reset bits */
100*4882a593Smuzhiyun #define INIT_POLL_TIMEOUT 2500000
101*4882a593Smuzhiyun /* Delay for polling reset bits */
102*4882a593Smuzhiyun #define INIT_POLL_DELAY 20
103*4882a593Smuzhiyun /* Signal this is the last DMA transfer, wait for the AXI and PCAP before
104*4882a593Smuzhiyun * interrupting
105*4882a593Smuzhiyun */
106*4882a593Smuzhiyun #define DMA_SRC_LAST_TRANSFER 1
107*4882a593Smuzhiyun /* Timeout for DMA completion */
108*4882a593Smuzhiyun #define DMA_TIMEOUT_MS 5000
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /* Masks for controlling stuff in SLCR */
111*4882a593Smuzhiyun /* Disable all Level shifters */
112*4882a593Smuzhiyun #define LVL_SHFTR_DISABLE_ALL_MASK 0x0
113*4882a593Smuzhiyun /* Enable Level shifters from PS to PL */
114*4882a593Smuzhiyun #define LVL_SHFTR_ENABLE_PS_TO_PL 0xa
115*4882a593Smuzhiyun /* Enable Level shifters from PL to PS */
116*4882a593Smuzhiyun #define LVL_SHFTR_ENABLE_PL_TO_PS 0xf
117*4882a593Smuzhiyun /* Enable global resets */
118*4882a593Smuzhiyun #define FPGA_RST_ALL_MASK 0xf
119*4882a593Smuzhiyun /* Disable global resets */
120*4882a593Smuzhiyun #define FPGA_RST_NONE_MASK 0x0
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun struct zynq_fpga_priv {
123*4882a593Smuzhiyun int irq;
124*4882a593Smuzhiyun struct clk *clk;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun void __iomem *io_base;
127*4882a593Smuzhiyun struct regmap *slcr;
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun spinlock_t dma_lock;
130*4882a593Smuzhiyun unsigned int dma_elm;
131*4882a593Smuzhiyun unsigned int dma_nelms;
132*4882a593Smuzhiyun struct scatterlist *cur_sg;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun struct completion dma_done;
135*4882a593Smuzhiyun };
136*4882a593Smuzhiyun
zynq_fpga_write(struct zynq_fpga_priv * priv,u32 offset,u32 val)137*4882a593Smuzhiyun static inline void zynq_fpga_write(struct zynq_fpga_priv *priv, u32 offset,
138*4882a593Smuzhiyun u32 val)
139*4882a593Smuzhiyun {
140*4882a593Smuzhiyun writel(val, priv->io_base + offset);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
zynq_fpga_read(const struct zynq_fpga_priv * priv,u32 offset)143*4882a593Smuzhiyun static inline u32 zynq_fpga_read(const struct zynq_fpga_priv *priv,
144*4882a593Smuzhiyun u32 offset)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun return readl(priv->io_base + offset);
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun #define zynq_fpga_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \
150*4882a593Smuzhiyun readl_poll_timeout(priv->io_base + addr, val, cond, sleep_us, \
151*4882a593Smuzhiyun timeout_us)
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun /* Cause the specified irq mask bits to generate IRQs */
zynq_fpga_set_irq(struct zynq_fpga_priv * priv,u32 enable)154*4882a593Smuzhiyun static inline void zynq_fpga_set_irq(struct zynq_fpga_priv *priv, u32 enable)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun zynq_fpga_write(priv, INT_MASK_OFFSET, ~enable);
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun /* Must be called with dma_lock held */
zynq_step_dma(struct zynq_fpga_priv * priv)160*4882a593Smuzhiyun static void zynq_step_dma(struct zynq_fpga_priv *priv)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun u32 addr;
163*4882a593Smuzhiyun u32 len;
164*4882a593Smuzhiyun bool first;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun first = priv->dma_elm == 0;
167*4882a593Smuzhiyun while (priv->cur_sg) {
168*4882a593Smuzhiyun /* Feed the DMA queue until it is full. */
169*4882a593Smuzhiyun if (zynq_fpga_read(priv, STATUS_OFFSET) & STATUS_DMA_Q_F)
170*4882a593Smuzhiyun break;
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun addr = sg_dma_address(priv->cur_sg);
173*4882a593Smuzhiyun len = sg_dma_len(priv->cur_sg);
174*4882a593Smuzhiyun if (priv->dma_elm + 1 == priv->dma_nelms) {
175*4882a593Smuzhiyun /* The last transfer waits for the PCAP to finish too,
176*4882a593Smuzhiyun * notice this also changes the irq_mask to ignore
177*4882a593Smuzhiyun * IXR_DMA_DONE_MASK which ensures we do not trigger
178*4882a593Smuzhiyun * the completion too early.
179*4882a593Smuzhiyun */
180*4882a593Smuzhiyun addr |= DMA_SRC_LAST_TRANSFER;
181*4882a593Smuzhiyun priv->cur_sg = NULL;
182*4882a593Smuzhiyun } else {
183*4882a593Smuzhiyun priv->cur_sg = sg_next(priv->cur_sg);
184*4882a593Smuzhiyun priv->dma_elm++;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun zynq_fpga_write(priv, DMA_SRC_ADDR_OFFSET, addr);
188*4882a593Smuzhiyun zynq_fpga_write(priv, DMA_DST_ADDR_OFFSET, DMA_INVALID_ADDRESS);
189*4882a593Smuzhiyun zynq_fpga_write(priv, DMA_SRC_LEN_OFFSET, len / 4);
190*4882a593Smuzhiyun zynq_fpga_write(priv, DMA_DEST_LEN_OFFSET, 0);
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun /* Once the first transfer is queued we can turn on the ISR, future
194*4882a593Smuzhiyun * calls to zynq_step_dma will happen from the ISR context. The
195*4882a593Smuzhiyun * dma_lock spinlock guarentees this handover is done coherently, the
196*4882a593Smuzhiyun * ISR enable is put at the end to avoid another CPU spinning in the
197*4882a593Smuzhiyun * ISR on this lock.
198*4882a593Smuzhiyun */
199*4882a593Smuzhiyun if (first && priv->cur_sg) {
200*4882a593Smuzhiyun zynq_fpga_set_irq(priv,
201*4882a593Smuzhiyun IXR_DMA_DONE_MASK | IXR_ERROR_FLAGS_MASK);
202*4882a593Smuzhiyun } else if (!priv->cur_sg) {
203*4882a593Smuzhiyun /* The last transfer changes to DMA & PCAP mode since we do
204*4882a593Smuzhiyun * not want to continue until everything has been flushed into
205*4882a593Smuzhiyun * the PCAP.
206*4882a593Smuzhiyun */
207*4882a593Smuzhiyun zynq_fpga_set_irq(priv,
208*4882a593Smuzhiyun IXR_D_P_DONE_MASK | IXR_ERROR_FLAGS_MASK);
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
zynq_fpga_isr(int irq,void * data)212*4882a593Smuzhiyun static irqreturn_t zynq_fpga_isr(int irq, void *data)
213*4882a593Smuzhiyun {
214*4882a593Smuzhiyun struct zynq_fpga_priv *priv = data;
215*4882a593Smuzhiyun u32 intr_status;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun /* If anything other than DMA completion is reported stop and hand
218*4882a593Smuzhiyun * control back to zynq_fpga_ops_write, something went wrong,
219*4882a593Smuzhiyun * otherwise progress the DMA.
220*4882a593Smuzhiyun */
221*4882a593Smuzhiyun spin_lock(&priv->dma_lock);
222*4882a593Smuzhiyun intr_status = zynq_fpga_read(priv, INT_STS_OFFSET);
223*4882a593Smuzhiyun if (!(intr_status & IXR_ERROR_FLAGS_MASK) &&
224*4882a593Smuzhiyun (intr_status & IXR_DMA_DONE_MASK) && priv->cur_sg) {
225*4882a593Smuzhiyun zynq_fpga_write(priv, INT_STS_OFFSET, IXR_DMA_DONE_MASK);
226*4882a593Smuzhiyun zynq_step_dma(priv);
227*4882a593Smuzhiyun spin_unlock(&priv->dma_lock);
228*4882a593Smuzhiyun return IRQ_HANDLED;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun spin_unlock(&priv->dma_lock);
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun zynq_fpga_set_irq(priv, 0);
233*4882a593Smuzhiyun complete(&priv->dma_done);
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun return IRQ_HANDLED;
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun /* Sanity check the proposed bitstream. It must start with the sync word in
239*4882a593Smuzhiyun * the correct byte order, and be dword aligned. The input is a Xilinx .bin
240*4882a593Smuzhiyun * file with every 32 bit quantity swapped.
241*4882a593Smuzhiyun */
zynq_fpga_has_sync(const u8 * buf,size_t count)242*4882a593Smuzhiyun static bool zynq_fpga_has_sync(const u8 *buf, size_t count)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun for (; count >= 4; buf += 4, count -= 4)
245*4882a593Smuzhiyun if (buf[0] == 0x66 && buf[1] == 0x55 && buf[2] == 0x99 &&
246*4882a593Smuzhiyun buf[3] == 0xaa)
247*4882a593Smuzhiyun return true;
248*4882a593Smuzhiyun return false;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
zynq_fpga_ops_write_init(struct fpga_manager * mgr,struct fpga_image_info * info,const char * buf,size_t count)251*4882a593Smuzhiyun static int zynq_fpga_ops_write_init(struct fpga_manager *mgr,
252*4882a593Smuzhiyun struct fpga_image_info *info,
253*4882a593Smuzhiyun const char *buf, size_t count)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun struct zynq_fpga_priv *priv;
256*4882a593Smuzhiyun u32 ctrl, status;
257*4882a593Smuzhiyun int err;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun priv = mgr->priv;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun err = clk_enable(priv->clk);
262*4882a593Smuzhiyun if (err)
263*4882a593Smuzhiyun return err;
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun /* check if bitstream is encrypted & and system's still secure */
266*4882a593Smuzhiyun if (info->flags & FPGA_MGR_ENCRYPTED_BITSTREAM) {
267*4882a593Smuzhiyun ctrl = zynq_fpga_read(priv, CTRL_OFFSET);
268*4882a593Smuzhiyun if (!(ctrl & CTRL_SEC_EN_MASK)) {
269*4882a593Smuzhiyun dev_err(&mgr->dev,
270*4882a593Smuzhiyun "System not secure, can't use crypted bitstreams\n");
271*4882a593Smuzhiyun err = -EINVAL;
272*4882a593Smuzhiyun goto out_err;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun }
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun /* don't globally reset PL if we're doing partial reconfig */
277*4882a593Smuzhiyun if (!(info->flags & FPGA_MGR_PARTIAL_RECONFIG)) {
278*4882a593Smuzhiyun if (!zynq_fpga_has_sync(buf, count)) {
279*4882a593Smuzhiyun dev_err(&mgr->dev,
280*4882a593Smuzhiyun "Invalid bitstream, could not find a sync word. Bitstream must be a byte swapped .bin file\n");
281*4882a593Smuzhiyun err = -EINVAL;
282*4882a593Smuzhiyun goto out_err;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun /* assert AXI interface resets */
286*4882a593Smuzhiyun regmap_write(priv->slcr, SLCR_FPGA_RST_CTRL_OFFSET,
287*4882a593Smuzhiyun FPGA_RST_ALL_MASK);
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun /* disable all level shifters */
290*4882a593Smuzhiyun regmap_write(priv->slcr, SLCR_LVL_SHFTR_EN_OFFSET,
291*4882a593Smuzhiyun LVL_SHFTR_DISABLE_ALL_MASK);
292*4882a593Smuzhiyun /* enable level shifters from PS to PL */
293*4882a593Smuzhiyun regmap_write(priv->slcr, SLCR_LVL_SHFTR_EN_OFFSET,
294*4882a593Smuzhiyun LVL_SHFTR_ENABLE_PS_TO_PL);
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun /* create a rising edge on PCFG_INIT. PCFG_INIT follows
297*4882a593Smuzhiyun * PCFG_PROG_B, so we need to poll it after setting PCFG_PROG_B
298*4882a593Smuzhiyun * to make sure the rising edge actually happens.
299*4882a593Smuzhiyun * Note: PCFG_PROG_B is low active, sequence as described in
300*4882a593Smuzhiyun * UG585 v1.10 page 211
301*4882a593Smuzhiyun */
302*4882a593Smuzhiyun ctrl = zynq_fpga_read(priv, CTRL_OFFSET);
303*4882a593Smuzhiyun ctrl |= CTRL_PCFG_PROG_B_MASK;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun zynq_fpga_write(priv, CTRL_OFFSET, ctrl);
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun err = zynq_fpga_poll_timeout(priv, STATUS_OFFSET, status,
308*4882a593Smuzhiyun status & STATUS_PCFG_INIT_MASK,
309*4882a593Smuzhiyun INIT_POLL_DELAY,
310*4882a593Smuzhiyun INIT_POLL_TIMEOUT);
311*4882a593Smuzhiyun if (err) {
312*4882a593Smuzhiyun dev_err(&mgr->dev, "Timeout waiting for PCFG_INIT\n");
313*4882a593Smuzhiyun goto out_err;
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun ctrl = zynq_fpga_read(priv, CTRL_OFFSET);
317*4882a593Smuzhiyun ctrl &= ~CTRL_PCFG_PROG_B_MASK;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun zynq_fpga_write(priv, CTRL_OFFSET, ctrl);
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun err = zynq_fpga_poll_timeout(priv, STATUS_OFFSET, status,
322*4882a593Smuzhiyun !(status & STATUS_PCFG_INIT_MASK),
323*4882a593Smuzhiyun INIT_POLL_DELAY,
324*4882a593Smuzhiyun INIT_POLL_TIMEOUT);
325*4882a593Smuzhiyun if (err) {
326*4882a593Smuzhiyun dev_err(&mgr->dev, "Timeout waiting for !PCFG_INIT\n");
327*4882a593Smuzhiyun goto out_err;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun ctrl = zynq_fpga_read(priv, CTRL_OFFSET);
331*4882a593Smuzhiyun ctrl |= CTRL_PCFG_PROG_B_MASK;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun zynq_fpga_write(priv, CTRL_OFFSET, ctrl);
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun err = zynq_fpga_poll_timeout(priv, STATUS_OFFSET, status,
336*4882a593Smuzhiyun status & STATUS_PCFG_INIT_MASK,
337*4882a593Smuzhiyun INIT_POLL_DELAY,
338*4882a593Smuzhiyun INIT_POLL_TIMEOUT);
339*4882a593Smuzhiyun if (err) {
340*4882a593Smuzhiyun dev_err(&mgr->dev, "Timeout waiting for PCFG_INIT\n");
341*4882a593Smuzhiyun goto out_err;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun /* set configuration register with following options:
346*4882a593Smuzhiyun * - enable PCAP interface
347*4882a593Smuzhiyun * - set throughput for maximum speed (if bistream not crypted)
348*4882a593Smuzhiyun * - set CPU in user mode
349*4882a593Smuzhiyun */
350*4882a593Smuzhiyun ctrl = zynq_fpga_read(priv, CTRL_OFFSET);
351*4882a593Smuzhiyun if (info->flags & FPGA_MGR_ENCRYPTED_BITSTREAM)
352*4882a593Smuzhiyun zynq_fpga_write(priv, CTRL_OFFSET,
353*4882a593Smuzhiyun (CTRL_PCAP_PR_MASK | CTRL_PCAP_MODE_MASK
354*4882a593Smuzhiyun | CTRL_PCAP_RATE_EN_MASK | ctrl));
355*4882a593Smuzhiyun else
356*4882a593Smuzhiyun zynq_fpga_write(priv, CTRL_OFFSET,
357*4882a593Smuzhiyun (CTRL_PCAP_PR_MASK | CTRL_PCAP_MODE_MASK
358*4882a593Smuzhiyun | ctrl));
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun /* We expect that the command queue is empty right now. */
362*4882a593Smuzhiyun status = zynq_fpga_read(priv, STATUS_OFFSET);
363*4882a593Smuzhiyun if ((status & STATUS_DMA_Q_F) ||
364*4882a593Smuzhiyun (status & STATUS_DMA_Q_E) != STATUS_DMA_Q_E) {
365*4882a593Smuzhiyun dev_err(&mgr->dev, "DMA command queue not right\n");
366*4882a593Smuzhiyun err = -EBUSY;
367*4882a593Smuzhiyun goto out_err;
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun /* ensure internal PCAP loopback is disabled */
371*4882a593Smuzhiyun ctrl = zynq_fpga_read(priv, MCTRL_OFFSET);
372*4882a593Smuzhiyun zynq_fpga_write(priv, MCTRL_OFFSET, (~MCTRL_PCAP_LPBK_MASK & ctrl));
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun clk_disable(priv->clk);
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun return 0;
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun out_err:
379*4882a593Smuzhiyun clk_disable(priv->clk);
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun return err;
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun
zynq_fpga_ops_write(struct fpga_manager * mgr,struct sg_table * sgt)384*4882a593Smuzhiyun static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun struct zynq_fpga_priv *priv;
387*4882a593Smuzhiyun const char *why;
388*4882a593Smuzhiyun int err;
389*4882a593Smuzhiyun u32 intr_status;
390*4882a593Smuzhiyun unsigned long timeout;
391*4882a593Smuzhiyun unsigned long flags;
392*4882a593Smuzhiyun struct scatterlist *sg;
393*4882a593Smuzhiyun int i;
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun priv = mgr->priv;
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun /* The hardware can only DMA multiples of 4 bytes, and it requires the
398*4882a593Smuzhiyun * starting addresses to be aligned to 64 bits (UG585 pg 212).
399*4882a593Smuzhiyun */
400*4882a593Smuzhiyun for_each_sg(sgt->sgl, sg, sgt->nents, i) {
401*4882a593Smuzhiyun if ((sg->offset % 8) || (sg->length % 4)) {
402*4882a593Smuzhiyun dev_err(&mgr->dev,
403*4882a593Smuzhiyun "Invalid bitstream, chunks must be aligned\n");
404*4882a593Smuzhiyun return -EINVAL;
405*4882a593Smuzhiyun }
406*4882a593Smuzhiyun }
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun priv->dma_nelms =
409*4882a593Smuzhiyun dma_map_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
410*4882a593Smuzhiyun if (priv->dma_nelms == 0) {
411*4882a593Smuzhiyun dev_err(&mgr->dev, "Unable to DMA map (TO_DEVICE)\n");
412*4882a593Smuzhiyun return -ENOMEM;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun /* enable clock */
416*4882a593Smuzhiyun err = clk_enable(priv->clk);
417*4882a593Smuzhiyun if (err)
418*4882a593Smuzhiyun goto out_free;
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK);
421*4882a593Smuzhiyun reinit_completion(&priv->dma_done);
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun /* zynq_step_dma will turn on interrupts */
424*4882a593Smuzhiyun spin_lock_irqsave(&priv->dma_lock, flags);
425*4882a593Smuzhiyun priv->dma_elm = 0;
426*4882a593Smuzhiyun priv->cur_sg = sgt->sgl;
427*4882a593Smuzhiyun zynq_step_dma(priv);
428*4882a593Smuzhiyun spin_unlock_irqrestore(&priv->dma_lock, flags);
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun timeout = wait_for_completion_timeout(&priv->dma_done,
431*4882a593Smuzhiyun msecs_to_jiffies(DMA_TIMEOUT_MS));
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun spin_lock_irqsave(&priv->dma_lock, flags);
434*4882a593Smuzhiyun zynq_fpga_set_irq(priv, 0);
435*4882a593Smuzhiyun priv->cur_sg = NULL;
436*4882a593Smuzhiyun spin_unlock_irqrestore(&priv->dma_lock, flags);
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun intr_status = zynq_fpga_read(priv, INT_STS_OFFSET);
439*4882a593Smuzhiyun zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK);
440*4882a593Smuzhiyun
441*4882a593Smuzhiyun /* There doesn't seem to be a way to force cancel any DMA, so if
442*4882a593Smuzhiyun * something went wrong we are relying on the hardware to have halted
443*4882a593Smuzhiyun * the DMA before we get here, if there was we could use
444*4882a593Smuzhiyun * wait_for_completion_interruptible too.
445*4882a593Smuzhiyun */
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun if (intr_status & IXR_ERROR_FLAGS_MASK) {
448*4882a593Smuzhiyun why = "DMA reported error";
449*4882a593Smuzhiyun err = -EIO;
450*4882a593Smuzhiyun goto out_report;
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun if (priv->cur_sg ||
454*4882a593Smuzhiyun !((intr_status & IXR_D_P_DONE_MASK) == IXR_D_P_DONE_MASK)) {
455*4882a593Smuzhiyun if (timeout == 0)
456*4882a593Smuzhiyun why = "DMA timed out";
457*4882a593Smuzhiyun else
458*4882a593Smuzhiyun why = "DMA did not complete";
459*4882a593Smuzhiyun err = -EIO;
460*4882a593Smuzhiyun goto out_report;
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun err = 0;
464*4882a593Smuzhiyun goto out_clk;
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun out_report:
467*4882a593Smuzhiyun dev_err(&mgr->dev,
468*4882a593Smuzhiyun "%s: INT_STS:0x%x CTRL:0x%x LOCK:0x%x INT_MASK:0x%x STATUS:0x%x MCTRL:0x%x\n",
469*4882a593Smuzhiyun why,
470*4882a593Smuzhiyun intr_status,
471*4882a593Smuzhiyun zynq_fpga_read(priv, CTRL_OFFSET),
472*4882a593Smuzhiyun zynq_fpga_read(priv, LOCK_OFFSET),
473*4882a593Smuzhiyun zynq_fpga_read(priv, INT_MASK_OFFSET),
474*4882a593Smuzhiyun zynq_fpga_read(priv, STATUS_OFFSET),
475*4882a593Smuzhiyun zynq_fpga_read(priv, MCTRL_OFFSET));
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun out_clk:
478*4882a593Smuzhiyun clk_disable(priv->clk);
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun out_free:
481*4882a593Smuzhiyun dma_unmap_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
482*4882a593Smuzhiyun return err;
483*4882a593Smuzhiyun }
484*4882a593Smuzhiyun
zynq_fpga_ops_write_complete(struct fpga_manager * mgr,struct fpga_image_info * info)485*4882a593Smuzhiyun static int zynq_fpga_ops_write_complete(struct fpga_manager *mgr,
486*4882a593Smuzhiyun struct fpga_image_info *info)
487*4882a593Smuzhiyun {
488*4882a593Smuzhiyun struct zynq_fpga_priv *priv = mgr->priv;
489*4882a593Smuzhiyun int err;
490*4882a593Smuzhiyun u32 intr_status;
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun err = clk_enable(priv->clk);
493*4882a593Smuzhiyun if (err)
494*4882a593Smuzhiyun return err;
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun /* Release 'PR' control back to the ICAP */
497*4882a593Smuzhiyun zynq_fpga_write(priv, CTRL_OFFSET,
498*4882a593Smuzhiyun zynq_fpga_read(priv, CTRL_OFFSET) & ~CTRL_PCAP_PR_MASK);
499*4882a593Smuzhiyun
500*4882a593Smuzhiyun err = zynq_fpga_poll_timeout(priv, INT_STS_OFFSET, intr_status,
501*4882a593Smuzhiyun intr_status & IXR_PCFG_DONE_MASK,
502*4882a593Smuzhiyun INIT_POLL_DELAY,
503*4882a593Smuzhiyun INIT_POLL_TIMEOUT);
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun clk_disable(priv->clk);
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun if (err)
508*4882a593Smuzhiyun return err;
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun /* for the partial reconfig case we didn't touch the level shifters */
511*4882a593Smuzhiyun if (!(info->flags & FPGA_MGR_PARTIAL_RECONFIG)) {
512*4882a593Smuzhiyun /* enable level shifters from PL to PS */
513*4882a593Smuzhiyun regmap_write(priv->slcr, SLCR_LVL_SHFTR_EN_OFFSET,
514*4882a593Smuzhiyun LVL_SHFTR_ENABLE_PL_TO_PS);
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun /* deassert AXI interface resets */
517*4882a593Smuzhiyun regmap_write(priv->slcr, SLCR_FPGA_RST_CTRL_OFFSET,
518*4882a593Smuzhiyun FPGA_RST_NONE_MASK);
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun return 0;
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun
zynq_fpga_ops_state(struct fpga_manager * mgr)524*4882a593Smuzhiyun static enum fpga_mgr_states zynq_fpga_ops_state(struct fpga_manager *mgr)
525*4882a593Smuzhiyun {
526*4882a593Smuzhiyun int err;
527*4882a593Smuzhiyun u32 intr_status;
528*4882a593Smuzhiyun struct zynq_fpga_priv *priv;
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun priv = mgr->priv;
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun err = clk_enable(priv->clk);
533*4882a593Smuzhiyun if (err)
534*4882a593Smuzhiyun return FPGA_MGR_STATE_UNKNOWN;
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun intr_status = zynq_fpga_read(priv, INT_STS_OFFSET);
537*4882a593Smuzhiyun clk_disable(priv->clk);
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun if (intr_status & IXR_PCFG_DONE_MASK)
540*4882a593Smuzhiyun return FPGA_MGR_STATE_OPERATING;
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun return FPGA_MGR_STATE_UNKNOWN;
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun static const struct fpga_manager_ops zynq_fpga_ops = {
546*4882a593Smuzhiyun .initial_header_size = 128,
547*4882a593Smuzhiyun .state = zynq_fpga_ops_state,
548*4882a593Smuzhiyun .write_init = zynq_fpga_ops_write_init,
549*4882a593Smuzhiyun .write_sg = zynq_fpga_ops_write,
550*4882a593Smuzhiyun .write_complete = zynq_fpga_ops_write_complete,
551*4882a593Smuzhiyun };
552*4882a593Smuzhiyun
zynq_fpga_probe(struct platform_device * pdev)553*4882a593Smuzhiyun static int zynq_fpga_probe(struct platform_device *pdev)
554*4882a593Smuzhiyun {
555*4882a593Smuzhiyun struct device *dev = &pdev->dev;
556*4882a593Smuzhiyun struct zynq_fpga_priv *priv;
557*4882a593Smuzhiyun struct fpga_manager *mgr;
558*4882a593Smuzhiyun struct resource *res;
559*4882a593Smuzhiyun int err;
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
562*4882a593Smuzhiyun if (!priv)
563*4882a593Smuzhiyun return -ENOMEM;
564*4882a593Smuzhiyun spin_lock_init(&priv->dma_lock);
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
567*4882a593Smuzhiyun priv->io_base = devm_ioremap_resource(dev, res);
568*4882a593Smuzhiyun if (IS_ERR(priv->io_base))
569*4882a593Smuzhiyun return PTR_ERR(priv->io_base);
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun priv->slcr = syscon_regmap_lookup_by_phandle(dev->of_node,
572*4882a593Smuzhiyun "syscon");
573*4882a593Smuzhiyun if (IS_ERR(priv->slcr)) {
574*4882a593Smuzhiyun dev_err(dev, "unable to get zynq-slcr regmap\n");
575*4882a593Smuzhiyun return PTR_ERR(priv->slcr);
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun init_completion(&priv->dma_done);
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun priv->irq = platform_get_irq(pdev, 0);
581*4882a593Smuzhiyun if (priv->irq < 0)
582*4882a593Smuzhiyun return priv->irq;
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun priv->clk = devm_clk_get(dev, "ref_clk");
585*4882a593Smuzhiyun if (IS_ERR(priv->clk)) {
586*4882a593Smuzhiyun if (PTR_ERR(priv->clk) != -EPROBE_DEFER)
587*4882a593Smuzhiyun dev_err(dev, "input clock not found\n");
588*4882a593Smuzhiyun return PTR_ERR(priv->clk);
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun err = clk_prepare_enable(priv->clk);
592*4882a593Smuzhiyun if (err) {
593*4882a593Smuzhiyun dev_err(dev, "unable to enable clock\n");
594*4882a593Smuzhiyun return err;
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun /* unlock the device */
598*4882a593Smuzhiyun zynq_fpga_write(priv, UNLOCK_OFFSET, UNLOCK_MASK);
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun zynq_fpga_set_irq(priv, 0);
601*4882a593Smuzhiyun zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK);
602*4882a593Smuzhiyun err = devm_request_irq(dev, priv->irq, zynq_fpga_isr, 0, dev_name(dev),
603*4882a593Smuzhiyun priv);
604*4882a593Smuzhiyun if (err) {
605*4882a593Smuzhiyun dev_err(dev, "unable to request IRQ\n");
606*4882a593Smuzhiyun clk_disable_unprepare(priv->clk);
607*4882a593Smuzhiyun return err;
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun clk_disable(priv->clk);
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun mgr = devm_fpga_mgr_create(dev, "Xilinx Zynq FPGA Manager",
613*4882a593Smuzhiyun &zynq_fpga_ops, priv);
614*4882a593Smuzhiyun if (!mgr)
615*4882a593Smuzhiyun return -ENOMEM;
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun platform_set_drvdata(pdev, mgr);
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun err = fpga_mgr_register(mgr);
620*4882a593Smuzhiyun if (err) {
621*4882a593Smuzhiyun dev_err(dev, "unable to register FPGA manager\n");
622*4882a593Smuzhiyun clk_unprepare(priv->clk);
623*4882a593Smuzhiyun return err;
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun return 0;
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun
zynq_fpga_remove(struct platform_device * pdev)629*4882a593Smuzhiyun static int zynq_fpga_remove(struct platform_device *pdev)
630*4882a593Smuzhiyun {
631*4882a593Smuzhiyun struct zynq_fpga_priv *priv;
632*4882a593Smuzhiyun struct fpga_manager *mgr;
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun mgr = platform_get_drvdata(pdev);
635*4882a593Smuzhiyun priv = mgr->priv;
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun fpga_mgr_unregister(mgr);
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun clk_unprepare(priv->clk);
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun return 0;
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun #ifdef CONFIG_OF
645*4882a593Smuzhiyun static const struct of_device_id zynq_fpga_of_match[] = {
646*4882a593Smuzhiyun { .compatible = "xlnx,zynq-devcfg-1.0", },
647*4882a593Smuzhiyun {},
648*4882a593Smuzhiyun };
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, zynq_fpga_of_match);
651*4882a593Smuzhiyun #endif
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun static struct platform_driver zynq_fpga_driver = {
654*4882a593Smuzhiyun .probe = zynq_fpga_probe,
655*4882a593Smuzhiyun .remove = zynq_fpga_remove,
656*4882a593Smuzhiyun .driver = {
657*4882a593Smuzhiyun .name = "zynq_fpga_manager",
658*4882a593Smuzhiyun .of_match_table = of_match_ptr(zynq_fpga_of_match),
659*4882a593Smuzhiyun },
660*4882a593Smuzhiyun };
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun module_platform_driver(zynq_fpga_driver);
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun MODULE_AUTHOR("Moritz Fischer <moritz.fischer@ettus.com>");
665*4882a593Smuzhiyun MODULE_AUTHOR("Michal Simek <michal.simek@xilinx.com>");
666*4882a593Smuzhiyun MODULE_DESCRIPTION("Xilinx Zynq FPGA Manager");
667*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
668