xref: /OK3568_Linux_fs/kernel/drivers/crypto/mediatek/mtk-platform.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Driver for EIP97 cryptographic accelerator.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (c) 2016 Ryder Lee <ryder.lee@mediatek.com>
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/clk.h>
9*4882a593Smuzhiyun #include <linux/init.h>
10*4882a593Smuzhiyun #include <linux/kernel.h>
11*4882a593Smuzhiyun #include <linux/module.h>
12*4882a593Smuzhiyun #include <linux/mod_devicetable.h>
13*4882a593Smuzhiyun #include <linux/platform_device.h>
14*4882a593Smuzhiyun #include <linux/pm_runtime.h>
15*4882a593Smuzhiyun #include "mtk-platform.h"
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #define MTK_BURST_SIZE_MSK		GENMASK(7, 4)
18*4882a593Smuzhiyun #define MTK_BURST_SIZE(x)		((x) << 4)
19*4882a593Smuzhiyun #define MTK_DESC_SIZE(x)		((x) << 0)
20*4882a593Smuzhiyun #define MTK_DESC_OFFSET(x)		((x) << 16)
21*4882a593Smuzhiyun #define MTK_DESC_FETCH_SIZE(x)		((x) << 0)
22*4882a593Smuzhiyun #define MTK_DESC_FETCH_THRESH(x)	((x) << 16)
23*4882a593Smuzhiyun #define MTK_DESC_OVL_IRQ_EN		BIT(25)
24*4882a593Smuzhiyun #define MTK_DESC_ATP_PRESENT		BIT(30)
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #define MTK_DFSE_IDLE			GENMASK(3, 0)
27*4882a593Smuzhiyun #define MTK_DFSE_THR_CTRL_EN		BIT(30)
28*4882a593Smuzhiyun #define MTK_DFSE_THR_CTRL_RESET		BIT(31)
29*4882a593Smuzhiyun #define MTK_DFSE_RING_ID(x)		(((x) >> 12) & GENMASK(3, 0))
30*4882a593Smuzhiyun #define MTK_DFSE_MIN_DATA(x)		((x) << 0)
31*4882a593Smuzhiyun #define MTK_DFSE_MAX_DATA(x)		((x) << 8)
32*4882a593Smuzhiyun #define MTK_DFE_MIN_CTRL(x)		((x) << 16)
33*4882a593Smuzhiyun #define MTK_DFE_MAX_CTRL(x)		((x) << 24)
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun #define MTK_IN_BUF_MIN_THRESH(x)	((x) << 8)
36*4882a593Smuzhiyun #define MTK_IN_BUF_MAX_THRESH(x)	((x) << 12)
37*4882a593Smuzhiyun #define MTK_OUT_BUF_MIN_THRESH(x)	((x) << 0)
38*4882a593Smuzhiyun #define MTK_OUT_BUF_MAX_THRESH(x)	((x) << 4)
39*4882a593Smuzhiyun #define MTK_IN_TBUF_SIZE(x)		(((x) >> 4) & GENMASK(3, 0))
40*4882a593Smuzhiyun #define MTK_IN_DBUF_SIZE(x)		(((x) >> 8) & GENMASK(3, 0))
41*4882a593Smuzhiyun #define MTK_OUT_DBUF_SIZE(x)		(((x) >> 16) & GENMASK(3, 0))
42*4882a593Smuzhiyun #define MTK_CMD_FIFO_SIZE(x)		(((x) >> 8) & GENMASK(3, 0))
43*4882a593Smuzhiyun #define MTK_RES_FIFO_SIZE(x)		(((x) >> 12) & GENMASK(3, 0))
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun #define MTK_PE_TK_LOC_AVL		BIT(2)
46*4882a593Smuzhiyun #define MTK_PE_PROC_HELD		BIT(14)
47*4882a593Smuzhiyun #define MTK_PE_TK_TIMEOUT_EN		BIT(22)
48*4882a593Smuzhiyun #define MTK_PE_INPUT_DMA_ERR		BIT(0)
49*4882a593Smuzhiyun #define MTK_PE_OUTPUT_DMA_ERR		BIT(1)
50*4882a593Smuzhiyun #define MTK_PE_PKT_PORC_ERR		BIT(2)
51*4882a593Smuzhiyun #define MTK_PE_PKT_TIMEOUT		BIT(3)
52*4882a593Smuzhiyun #define MTK_PE_FATAL_ERR		BIT(14)
53*4882a593Smuzhiyun #define MTK_PE_INPUT_DMA_ERR_EN		BIT(16)
54*4882a593Smuzhiyun #define MTK_PE_OUTPUT_DMA_ERR_EN	BIT(17)
55*4882a593Smuzhiyun #define MTK_PE_PKT_PORC_ERR_EN		BIT(18)
56*4882a593Smuzhiyun #define MTK_PE_PKT_TIMEOUT_EN		BIT(19)
57*4882a593Smuzhiyun #define MTK_PE_FATAL_ERR_EN		BIT(30)
58*4882a593Smuzhiyun #define MTK_PE_INT_OUT_EN		BIT(31)
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun #define MTK_HIA_SIGNATURE		((u16)0x35ca)
61*4882a593Smuzhiyun #define MTK_HIA_DATA_WIDTH(x)		(((x) >> 25) & GENMASK(1, 0))
62*4882a593Smuzhiyun #define MTK_HIA_DMA_LENGTH(x)		(((x) >> 20) & GENMASK(4, 0))
63*4882a593Smuzhiyun #define MTK_CDR_STAT_CLR		GENMASK(4, 0)
64*4882a593Smuzhiyun #define MTK_RDR_STAT_CLR		GENMASK(7, 0)
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun #define MTK_AIC_INT_MSK			GENMASK(5, 0)
67*4882a593Smuzhiyun #define MTK_AIC_VER_MSK			(GENMASK(15, 0) | GENMASK(27, 20))
68*4882a593Smuzhiyun #define MTK_AIC_VER11			0x011036c9
69*4882a593Smuzhiyun #define MTK_AIC_VER12			0x012036c9
70*4882a593Smuzhiyun #define MTK_AIC_G_CLR			GENMASK(30, 20)
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun /**
73*4882a593Smuzhiyun  * EIP97 is an integrated security subsystem to accelerate cryptographic
74*4882a593Smuzhiyun  * functions and protocols to offload the host processor.
75*4882a593Smuzhiyun  * Some important hardware modules are briefly introduced below:
76*4882a593Smuzhiyun  *
77*4882a593Smuzhiyun  * Host Interface Adapter(HIA) - the main interface between the host
78*4882a593Smuzhiyun  * system and the hardware subsystem. It is responsible for attaching
79*4882a593Smuzhiyun  * processing engine to the specific host bus interface and provides a
80*4882a593Smuzhiyun  * standardized software view for off loading tasks to the engine.
81*4882a593Smuzhiyun  *
82*4882a593Smuzhiyun  * Command Descriptor Ring Manager(CDR Manager) - keeps track of how many
83*4882a593Smuzhiyun  * CD the host has prepared in the CDR. It monitors the fill level of its
84*4882a593Smuzhiyun  * CD-FIFO and if there's sufficient space for the next block of descriptors,
85*4882a593Smuzhiyun  * then it fires off a DMA request to fetch a block of CDs.
86*4882a593Smuzhiyun  *
87*4882a593Smuzhiyun  * Data fetch engine(DFE) - It is responsible for parsing the CD and
88*4882a593Smuzhiyun  * setting up the required control and packet data DMA transfers from
89*4882a593Smuzhiyun  * system memory to the processing engine.
90*4882a593Smuzhiyun  *
91*4882a593Smuzhiyun  * Result Descriptor Ring Manager(RDR Manager) - same as CDR Manager,
92*4882a593Smuzhiyun  * but target is result descriptors, Moreover, it also handles the RD
93*4882a593Smuzhiyun  * updates under control of the DSE. For each packet data segment
94*4882a593Smuzhiyun  * processed, the DSE triggers the RDR Manager to write the updated RD.
95*4882a593Smuzhiyun  * If triggered to update, the RDR Manager sets up a DMA operation to
96*4882a593Smuzhiyun  * copy the RD from the DSE to the correct location in the RDR.
97*4882a593Smuzhiyun  *
98*4882a593Smuzhiyun  * Data Store Engine(DSE) - It is responsible for parsing the prepared RD
99*4882a593Smuzhiyun  * and setting up the required control and packet data DMA transfers from
100*4882a593Smuzhiyun  * the processing engine to system memory.
101*4882a593Smuzhiyun  *
102*4882a593Smuzhiyun  * Advanced Interrupt Controllers(AICs) - receive interrupt request signals
103*4882a593Smuzhiyun  * from various sources and combine them into one interrupt output.
104*4882a593Smuzhiyun  * The AICs are used by:
105*4882a593Smuzhiyun  * - One for the HIA global and processing engine interrupts.
106*4882a593Smuzhiyun  * - The others for the descriptor ring interrupts.
107*4882a593Smuzhiyun  */
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun /* Cryptographic engine capabilities */
110*4882a593Smuzhiyun struct mtk_sys_cap {
111*4882a593Smuzhiyun 	/* host interface adapter */
112*4882a593Smuzhiyun 	u32 hia_ver;
113*4882a593Smuzhiyun 	u32 hia_opt;
114*4882a593Smuzhiyun 	/* packet engine */
115*4882a593Smuzhiyun 	u32 pkt_eng_opt;
116*4882a593Smuzhiyun 	/* global hardware */
117*4882a593Smuzhiyun 	u32 hw_opt;
118*4882a593Smuzhiyun };
119*4882a593Smuzhiyun 
mtk_desc_ring_link(struct mtk_cryp * cryp,u32 mask)120*4882a593Smuzhiyun static void mtk_desc_ring_link(struct mtk_cryp *cryp, u32 mask)
121*4882a593Smuzhiyun {
122*4882a593Smuzhiyun 	/* Assign rings to DFE/DSE thread and enable it */
123*4882a593Smuzhiyun 	writel(MTK_DFSE_THR_CTRL_EN | mask, cryp->base + DFE_THR_CTRL);
124*4882a593Smuzhiyun 	writel(MTK_DFSE_THR_CTRL_EN | mask, cryp->base + DSE_THR_CTRL);
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun 
mtk_dfe_dse_buf_setup(struct mtk_cryp * cryp,struct mtk_sys_cap * cap)127*4882a593Smuzhiyun static void mtk_dfe_dse_buf_setup(struct mtk_cryp *cryp,
128*4882a593Smuzhiyun 				  struct mtk_sys_cap *cap)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun 	u32 width = MTK_HIA_DATA_WIDTH(cap->hia_opt) + 2;
131*4882a593Smuzhiyun 	u32 len = MTK_HIA_DMA_LENGTH(cap->hia_opt) - 1;
132*4882a593Smuzhiyun 	u32 ipbuf = min((u32)MTK_IN_DBUF_SIZE(cap->hw_opt) + width, len);
133*4882a593Smuzhiyun 	u32 opbuf = min((u32)MTK_OUT_DBUF_SIZE(cap->hw_opt) + width, len);
134*4882a593Smuzhiyun 	u32 itbuf = min((u32)MTK_IN_TBUF_SIZE(cap->hw_opt) + width, len);
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	writel(MTK_DFSE_MIN_DATA(ipbuf - 1) |
137*4882a593Smuzhiyun 	       MTK_DFSE_MAX_DATA(ipbuf) |
138*4882a593Smuzhiyun 	       MTK_DFE_MIN_CTRL(itbuf - 1) |
139*4882a593Smuzhiyun 	       MTK_DFE_MAX_CTRL(itbuf),
140*4882a593Smuzhiyun 	       cryp->base + DFE_CFG);
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	writel(MTK_DFSE_MIN_DATA(opbuf - 1) |
143*4882a593Smuzhiyun 	       MTK_DFSE_MAX_DATA(opbuf),
144*4882a593Smuzhiyun 	       cryp->base + DSE_CFG);
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	writel(MTK_IN_BUF_MIN_THRESH(ipbuf - 1) |
147*4882a593Smuzhiyun 	       MTK_IN_BUF_MAX_THRESH(ipbuf),
148*4882a593Smuzhiyun 	       cryp->base + PE_IN_DBUF_THRESH);
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	writel(MTK_IN_BUF_MIN_THRESH(itbuf - 1) |
151*4882a593Smuzhiyun 	       MTK_IN_BUF_MAX_THRESH(itbuf),
152*4882a593Smuzhiyun 	       cryp->base + PE_IN_TBUF_THRESH);
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	writel(MTK_OUT_BUF_MIN_THRESH(opbuf - 1) |
155*4882a593Smuzhiyun 	       MTK_OUT_BUF_MAX_THRESH(opbuf),
156*4882a593Smuzhiyun 	       cryp->base + PE_OUT_DBUF_THRESH);
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	writel(0, cryp->base + PE_OUT_TBUF_THRESH);
159*4882a593Smuzhiyun 	writel(0, cryp->base + PE_OUT_BUF_CTRL);
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun 
mtk_dfe_dse_state_check(struct mtk_cryp * cryp)162*4882a593Smuzhiyun static int mtk_dfe_dse_state_check(struct mtk_cryp *cryp)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun 	int ret = -EINVAL;
165*4882a593Smuzhiyun 	u32 val;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	/* Check for completion of all DMA transfers */
168*4882a593Smuzhiyun 	val = readl(cryp->base + DFE_THR_STAT);
169*4882a593Smuzhiyun 	if (MTK_DFSE_RING_ID(val) == MTK_DFSE_IDLE) {
170*4882a593Smuzhiyun 		val = readl(cryp->base + DSE_THR_STAT);
171*4882a593Smuzhiyun 		if (MTK_DFSE_RING_ID(val) == MTK_DFSE_IDLE)
172*4882a593Smuzhiyun 			ret = 0;
173*4882a593Smuzhiyun 	}
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	if (!ret) {
176*4882a593Smuzhiyun 		/* Take DFE/DSE thread out of reset */
177*4882a593Smuzhiyun 		writel(0, cryp->base + DFE_THR_CTRL);
178*4882a593Smuzhiyun 		writel(0, cryp->base + DSE_THR_CTRL);
179*4882a593Smuzhiyun 	} else {
180*4882a593Smuzhiyun 		return -EBUSY;
181*4882a593Smuzhiyun 	}
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	return 0;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun 
mtk_dfe_dse_reset(struct mtk_cryp * cryp)186*4882a593Smuzhiyun static int mtk_dfe_dse_reset(struct mtk_cryp *cryp)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun 	/* Reset DSE/DFE and correct system priorities for all rings. */
189*4882a593Smuzhiyun 	writel(MTK_DFSE_THR_CTRL_RESET, cryp->base + DFE_THR_CTRL);
190*4882a593Smuzhiyun 	writel(0, cryp->base + DFE_PRIO_0);
191*4882a593Smuzhiyun 	writel(0, cryp->base + DFE_PRIO_1);
192*4882a593Smuzhiyun 	writel(0, cryp->base + DFE_PRIO_2);
193*4882a593Smuzhiyun 	writel(0, cryp->base + DFE_PRIO_3);
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	writel(MTK_DFSE_THR_CTRL_RESET, cryp->base + DSE_THR_CTRL);
196*4882a593Smuzhiyun 	writel(0, cryp->base + DSE_PRIO_0);
197*4882a593Smuzhiyun 	writel(0, cryp->base + DSE_PRIO_1);
198*4882a593Smuzhiyun 	writel(0, cryp->base + DSE_PRIO_2);
199*4882a593Smuzhiyun 	writel(0, cryp->base + DSE_PRIO_3);
200*4882a593Smuzhiyun 
201*4882a593Smuzhiyun 	return mtk_dfe_dse_state_check(cryp);
202*4882a593Smuzhiyun }
203*4882a593Smuzhiyun 
mtk_cmd_desc_ring_setup(struct mtk_cryp * cryp,int i,struct mtk_sys_cap * cap)204*4882a593Smuzhiyun static void mtk_cmd_desc_ring_setup(struct mtk_cryp *cryp,
205*4882a593Smuzhiyun 				    int i, struct mtk_sys_cap *cap)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun 	/* Full descriptor that fits FIFO minus one */
208*4882a593Smuzhiyun 	u32 count =
209*4882a593Smuzhiyun 		((1 << MTK_CMD_FIFO_SIZE(cap->hia_opt)) / MTK_DESC_SZ) - 1;
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun 	/* Temporarily disable external triggering */
212*4882a593Smuzhiyun 	writel(0, cryp->base + CDR_CFG(i));
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	/* Clear CDR count */
215*4882a593Smuzhiyun 	writel(MTK_CNT_RST, cryp->base + CDR_PREP_COUNT(i));
216*4882a593Smuzhiyun 	writel(MTK_CNT_RST, cryp->base + CDR_PROC_COUNT(i));
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	writel(0, cryp->base + CDR_PREP_PNTR(i));
219*4882a593Smuzhiyun 	writel(0, cryp->base + CDR_PROC_PNTR(i));
220*4882a593Smuzhiyun 	writel(0, cryp->base + CDR_DMA_CFG(i));
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	/* Configure CDR host address space */
223*4882a593Smuzhiyun 	writel(0, cryp->base + CDR_BASE_ADDR_HI(i));
224*4882a593Smuzhiyun 	writel(cryp->ring[i]->cmd_dma, cryp->base + CDR_BASE_ADDR_LO(i));
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	writel(MTK_DESC_RING_SZ, cryp->base + CDR_RING_SIZE(i));
227*4882a593Smuzhiyun 
228*4882a593Smuzhiyun 	/* Clear and disable all CDR interrupts */
229*4882a593Smuzhiyun 	writel(MTK_CDR_STAT_CLR, cryp->base + CDR_STAT(i));
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	/*
232*4882a593Smuzhiyun 	 * Set command descriptor offset and enable additional
233*4882a593Smuzhiyun 	 * token present in descriptor.
234*4882a593Smuzhiyun 	 */
235*4882a593Smuzhiyun 	writel(MTK_DESC_SIZE(MTK_DESC_SZ) |
236*4882a593Smuzhiyun 		   MTK_DESC_OFFSET(MTK_DESC_OFF) |
237*4882a593Smuzhiyun 	       MTK_DESC_ATP_PRESENT,
238*4882a593Smuzhiyun 	       cryp->base + CDR_DESC_SIZE(i));
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	writel(MTK_DESC_FETCH_SIZE(count * MTK_DESC_OFF) |
241*4882a593Smuzhiyun 		   MTK_DESC_FETCH_THRESH(count * MTK_DESC_SZ),
242*4882a593Smuzhiyun 		   cryp->base + CDR_CFG(i));
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun 
mtk_res_desc_ring_setup(struct mtk_cryp * cryp,int i,struct mtk_sys_cap * cap)245*4882a593Smuzhiyun static void mtk_res_desc_ring_setup(struct mtk_cryp *cryp,
246*4882a593Smuzhiyun 				    int i, struct mtk_sys_cap *cap)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun 	u32 rndup = 2;
249*4882a593Smuzhiyun 	u32 count = ((1 << MTK_RES_FIFO_SIZE(cap->hia_opt)) / rndup) - 1;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	/* Temporarily disable external triggering */
252*4882a593Smuzhiyun 	writel(0, cryp->base + RDR_CFG(i));
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	/* Clear RDR count */
255*4882a593Smuzhiyun 	writel(MTK_CNT_RST, cryp->base + RDR_PREP_COUNT(i));
256*4882a593Smuzhiyun 	writel(MTK_CNT_RST, cryp->base + RDR_PROC_COUNT(i));
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	writel(0, cryp->base + RDR_PREP_PNTR(i));
259*4882a593Smuzhiyun 	writel(0, cryp->base + RDR_PROC_PNTR(i));
260*4882a593Smuzhiyun 	writel(0, cryp->base + RDR_DMA_CFG(i));
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	/* Configure RDR host address space */
263*4882a593Smuzhiyun 	writel(0, cryp->base + RDR_BASE_ADDR_HI(i));
264*4882a593Smuzhiyun 	writel(cryp->ring[i]->res_dma, cryp->base + RDR_BASE_ADDR_LO(i));
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	writel(MTK_DESC_RING_SZ, cryp->base + RDR_RING_SIZE(i));
267*4882a593Smuzhiyun 	writel(MTK_RDR_STAT_CLR, cryp->base + RDR_STAT(i));
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	/*
270*4882a593Smuzhiyun 	 * RDR manager generates update interrupts on a per-completed-packet,
271*4882a593Smuzhiyun 	 * and the rd_proc_thresh_irq interrupt is fired when proc_pkt_count
272*4882a593Smuzhiyun 	 * for the RDR exceeds the number of packets.
273*4882a593Smuzhiyun 	 */
274*4882a593Smuzhiyun 	writel(MTK_RDR_PROC_THRESH | MTK_RDR_PROC_MODE,
275*4882a593Smuzhiyun 	       cryp->base + RDR_THRESH(i));
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	/*
278*4882a593Smuzhiyun 	 * Configure a threshold and time-out value for the processed
279*4882a593Smuzhiyun 	 * result descriptors (or complete packets) that are written to
280*4882a593Smuzhiyun 	 * the RDR.
281*4882a593Smuzhiyun 	 */
282*4882a593Smuzhiyun 	writel(MTK_DESC_SIZE(MTK_DESC_SZ) | MTK_DESC_OFFSET(MTK_DESC_OFF),
283*4882a593Smuzhiyun 	       cryp->base + RDR_DESC_SIZE(i));
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	/*
286*4882a593Smuzhiyun 	 * Configure HIA fetch size and fetch threshold that are used to
287*4882a593Smuzhiyun 	 * fetch blocks of multiple descriptors.
288*4882a593Smuzhiyun 	 */
289*4882a593Smuzhiyun 	writel(MTK_DESC_FETCH_SIZE(count * MTK_DESC_OFF) |
290*4882a593Smuzhiyun 	       MTK_DESC_FETCH_THRESH(count * rndup) |
291*4882a593Smuzhiyun 	       MTK_DESC_OVL_IRQ_EN,
292*4882a593Smuzhiyun 		   cryp->base + RDR_CFG(i));
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun 
mtk_packet_engine_setup(struct mtk_cryp * cryp)295*4882a593Smuzhiyun static int mtk_packet_engine_setup(struct mtk_cryp *cryp)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun 	struct mtk_sys_cap cap;
298*4882a593Smuzhiyun 	int i, err;
299*4882a593Smuzhiyun 	u32 val;
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	cap.hia_ver = readl(cryp->base + HIA_VERSION);
302*4882a593Smuzhiyun 	cap.hia_opt = readl(cryp->base + HIA_OPTIONS);
303*4882a593Smuzhiyun 	cap.hw_opt = readl(cryp->base + EIP97_OPTIONS);
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	if (!(((u16)cap.hia_ver) == MTK_HIA_SIGNATURE))
306*4882a593Smuzhiyun 		return -EINVAL;
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	/* Configure endianness conversion method for master (DMA) interface */
309*4882a593Smuzhiyun 	writel(0, cryp->base + EIP97_MST_CTRL);
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	/* Set HIA burst size */
312*4882a593Smuzhiyun 	val = readl(cryp->base + HIA_MST_CTRL);
313*4882a593Smuzhiyun 	val &= ~MTK_BURST_SIZE_MSK;
314*4882a593Smuzhiyun 	val |= MTK_BURST_SIZE(5);
315*4882a593Smuzhiyun 	writel(val, cryp->base + HIA_MST_CTRL);
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	err = mtk_dfe_dse_reset(cryp);
318*4882a593Smuzhiyun 	if (err) {
319*4882a593Smuzhiyun 		dev_err(cryp->dev, "Failed to reset DFE and DSE.\n");
320*4882a593Smuzhiyun 		return err;
321*4882a593Smuzhiyun 	}
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	mtk_dfe_dse_buf_setup(cryp, &cap);
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	/* Enable the 4 rings for the packet engines. */
326*4882a593Smuzhiyun 	mtk_desc_ring_link(cryp, 0xf);
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	for (i = 0; i < MTK_RING_MAX; i++) {
329*4882a593Smuzhiyun 		mtk_cmd_desc_ring_setup(cryp, i, &cap);
330*4882a593Smuzhiyun 		mtk_res_desc_ring_setup(cryp, i, &cap);
331*4882a593Smuzhiyun 	}
332*4882a593Smuzhiyun 
333*4882a593Smuzhiyun 	writel(MTK_PE_TK_LOC_AVL | MTK_PE_PROC_HELD | MTK_PE_TK_TIMEOUT_EN,
334*4882a593Smuzhiyun 	       cryp->base + PE_TOKEN_CTRL_STAT);
335*4882a593Smuzhiyun 
336*4882a593Smuzhiyun 	/* Clear all pending interrupts */
337*4882a593Smuzhiyun 	writel(MTK_AIC_G_CLR, cryp->base + AIC_G_ACK);
338*4882a593Smuzhiyun 	writel(MTK_PE_INPUT_DMA_ERR | MTK_PE_OUTPUT_DMA_ERR |
339*4882a593Smuzhiyun 	       MTK_PE_PKT_PORC_ERR | MTK_PE_PKT_TIMEOUT |
340*4882a593Smuzhiyun 	       MTK_PE_FATAL_ERR | MTK_PE_INPUT_DMA_ERR_EN |
341*4882a593Smuzhiyun 	       MTK_PE_OUTPUT_DMA_ERR_EN | MTK_PE_PKT_PORC_ERR_EN |
342*4882a593Smuzhiyun 	       MTK_PE_PKT_TIMEOUT_EN | MTK_PE_FATAL_ERR_EN |
343*4882a593Smuzhiyun 	       MTK_PE_INT_OUT_EN,
344*4882a593Smuzhiyun 	       cryp->base + PE_INTERRUPT_CTRL_STAT);
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	return 0;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun 
mtk_aic_cap_check(struct mtk_cryp * cryp,int hw)349*4882a593Smuzhiyun static int mtk_aic_cap_check(struct mtk_cryp *cryp, int hw)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun 	u32 val;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	if (hw == MTK_RING_MAX)
354*4882a593Smuzhiyun 		val = readl(cryp->base + AIC_G_VERSION);
355*4882a593Smuzhiyun 	else
356*4882a593Smuzhiyun 		val = readl(cryp->base + AIC_VERSION(hw));
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	val &= MTK_AIC_VER_MSK;
359*4882a593Smuzhiyun 	if (val != MTK_AIC_VER11 && val != MTK_AIC_VER12)
360*4882a593Smuzhiyun 		return -ENXIO;
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	if (hw == MTK_RING_MAX)
363*4882a593Smuzhiyun 		val = readl(cryp->base + AIC_G_OPTIONS);
364*4882a593Smuzhiyun 	else
365*4882a593Smuzhiyun 		val = readl(cryp->base + AIC_OPTIONS(hw));
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun 	val &= MTK_AIC_INT_MSK;
368*4882a593Smuzhiyun 	if (!val || val > 32)
369*4882a593Smuzhiyun 		return -ENXIO;
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	return 0;
372*4882a593Smuzhiyun }
373*4882a593Smuzhiyun 
mtk_aic_init(struct mtk_cryp * cryp,int hw)374*4882a593Smuzhiyun static int mtk_aic_init(struct mtk_cryp *cryp, int hw)
375*4882a593Smuzhiyun {
376*4882a593Smuzhiyun 	int err;
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 	err = mtk_aic_cap_check(cryp, hw);
379*4882a593Smuzhiyun 	if (err)
380*4882a593Smuzhiyun 		return err;
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	/* Disable all interrupts and set initial configuration */
383*4882a593Smuzhiyun 	if (hw == MTK_RING_MAX) {
384*4882a593Smuzhiyun 		writel(0, cryp->base + AIC_G_ENABLE_CTRL);
385*4882a593Smuzhiyun 		writel(0, cryp->base + AIC_G_POL_CTRL);
386*4882a593Smuzhiyun 		writel(0, cryp->base + AIC_G_TYPE_CTRL);
387*4882a593Smuzhiyun 		writel(0, cryp->base + AIC_G_ENABLE_SET);
388*4882a593Smuzhiyun 	} else {
389*4882a593Smuzhiyun 		writel(0, cryp->base + AIC_ENABLE_CTRL(hw));
390*4882a593Smuzhiyun 		writel(0, cryp->base + AIC_POL_CTRL(hw));
391*4882a593Smuzhiyun 		writel(0, cryp->base + AIC_TYPE_CTRL(hw));
392*4882a593Smuzhiyun 		writel(0, cryp->base + AIC_ENABLE_SET(hw));
393*4882a593Smuzhiyun 	}
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	return 0;
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun 
mtk_accelerator_init(struct mtk_cryp * cryp)398*4882a593Smuzhiyun static int mtk_accelerator_init(struct mtk_cryp *cryp)
399*4882a593Smuzhiyun {
400*4882a593Smuzhiyun 	int i, err;
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	/* Initialize advanced interrupt controller(AIC) */
403*4882a593Smuzhiyun 	for (i = 0; i < MTK_IRQ_NUM; i++) {
404*4882a593Smuzhiyun 		err = mtk_aic_init(cryp, i);
405*4882a593Smuzhiyun 		if (err) {
406*4882a593Smuzhiyun 			dev_err(cryp->dev, "Failed to initialize AIC.\n");
407*4882a593Smuzhiyun 			return err;
408*4882a593Smuzhiyun 		}
409*4882a593Smuzhiyun 	}
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	/* Initialize packet engine */
412*4882a593Smuzhiyun 	err = mtk_packet_engine_setup(cryp);
413*4882a593Smuzhiyun 	if (err) {
414*4882a593Smuzhiyun 		dev_err(cryp->dev, "Failed to configure packet engine.\n");
415*4882a593Smuzhiyun 		return err;
416*4882a593Smuzhiyun 	}
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	return 0;
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun 
mtk_desc_dma_free(struct mtk_cryp * cryp)421*4882a593Smuzhiyun static void mtk_desc_dma_free(struct mtk_cryp *cryp)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun 	int i;
424*4882a593Smuzhiyun 
425*4882a593Smuzhiyun 	for (i = 0; i < MTK_RING_MAX; i++) {
426*4882a593Smuzhiyun 		dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
427*4882a593Smuzhiyun 				  cryp->ring[i]->res_base,
428*4882a593Smuzhiyun 				  cryp->ring[i]->res_dma);
429*4882a593Smuzhiyun 		dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
430*4882a593Smuzhiyun 				  cryp->ring[i]->cmd_base,
431*4882a593Smuzhiyun 				  cryp->ring[i]->cmd_dma);
432*4882a593Smuzhiyun 		kfree(cryp->ring[i]);
433*4882a593Smuzhiyun 	}
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun 
mtk_desc_ring_alloc(struct mtk_cryp * cryp)436*4882a593Smuzhiyun static int mtk_desc_ring_alloc(struct mtk_cryp *cryp)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun 	struct mtk_ring **ring = cryp->ring;
439*4882a593Smuzhiyun 	int i;
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	for (i = 0; i < MTK_RING_MAX; i++) {
442*4882a593Smuzhiyun 		ring[i] = kzalloc(sizeof(**ring), GFP_KERNEL);
443*4882a593Smuzhiyun 		if (!ring[i])
444*4882a593Smuzhiyun 			goto err_cleanup;
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 		ring[i]->cmd_base = dma_alloc_coherent(cryp->dev,
447*4882a593Smuzhiyun 						       MTK_DESC_RING_SZ,
448*4882a593Smuzhiyun 						       &ring[i]->cmd_dma,
449*4882a593Smuzhiyun 						       GFP_KERNEL);
450*4882a593Smuzhiyun 		if (!ring[i]->cmd_base)
451*4882a593Smuzhiyun 			goto err_cleanup;
452*4882a593Smuzhiyun 
453*4882a593Smuzhiyun 		ring[i]->res_base = dma_alloc_coherent(cryp->dev,
454*4882a593Smuzhiyun 						       MTK_DESC_RING_SZ,
455*4882a593Smuzhiyun 						       &ring[i]->res_dma,
456*4882a593Smuzhiyun 						       GFP_KERNEL);
457*4882a593Smuzhiyun 		if (!ring[i]->res_base)
458*4882a593Smuzhiyun 			goto err_cleanup;
459*4882a593Smuzhiyun 
460*4882a593Smuzhiyun 		ring[i]->cmd_next = ring[i]->cmd_base;
461*4882a593Smuzhiyun 		ring[i]->res_next = ring[i]->res_base;
462*4882a593Smuzhiyun 	}
463*4882a593Smuzhiyun 	return 0;
464*4882a593Smuzhiyun 
465*4882a593Smuzhiyun err_cleanup:
466*4882a593Smuzhiyun 	do {
467*4882a593Smuzhiyun 		dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
468*4882a593Smuzhiyun 				  ring[i]->res_base, ring[i]->res_dma);
469*4882a593Smuzhiyun 		dma_free_coherent(cryp->dev, MTK_DESC_RING_SZ,
470*4882a593Smuzhiyun 				  ring[i]->cmd_base, ring[i]->cmd_dma);
471*4882a593Smuzhiyun 		kfree(ring[i]);
472*4882a593Smuzhiyun 	} while (i--);
473*4882a593Smuzhiyun 	return -ENOMEM;
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun 
mtk_crypto_probe(struct platform_device * pdev)476*4882a593Smuzhiyun static int mtk_crypto_probe(struct platform_device *pdev)
477*4882a593Smuzhiyun {
478*4882a593Smuzhiyun 	struct mtk_cryp *cryp;
479*4882a593Smuzhiyun 	int i, err;
480*4882a593Smuzhiyun 
481*4882a593Smuzhiyun 	cryp = devm_kzalloc(&pdev->dev, sizeof(*cryp), GFP_KERNEL);
482*4882a593Smuzhiyun 	if (!cryp)
483*4882a593Smuzhiyun 		return -ENOMEM;
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	cryp->base = devm_platform_ioremap_resource(pdev, 0);
486*4882a593Smuzhiyun 	if (IS_ERR(cryp->base))
487*4882a593Smuzhiyun 		return PTR_ERR(cryp->base);
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	for (i = 0; i < MTK_IRQ_NUM; i++) {
490*4882a593Smuzhiyun 		cryp->irq[i] = platform_get_irq(pdev, i);
491*4882a593Smuzhiyun 		if (cryp->irq[i] < 0)
492*4882a593Smuzhiyun 			return cryp->irq[i];
493*4882a593Smuzhiyun 	}
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	cryp->clk_cryp = devm_clk_get(&pdev->dev, "cryp");
496*4882a593Smuzhiyun 	if (IS_ERR(cryp->clk_cryp))
497*4882a593Smuzhiyun 		return -EPROBE_DEFER;
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 	cryp->dev = &pdev->dev;
500*4882a593Smuzhiyun 	pm_runtime_enable(cryp->dev);
501*4882a593Smuzhiyun 	pm_runtime_get_sync(cryp->dev);
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	err = clk_prepare_enable(cryp->clk_cryp);
504*4882a593Smuzhiyun 	if (err)
505*4882a593Smuzhiyun 		goto err_clk_cryp;
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	/* Allocate four command/result descriptor rings */
508*4882a593Smuzhiyun 	err = mtk_desc_ring_alloc(cryp);
509*4882a593Smuzhiyun 	if (err) {
510*4882a593Smuzhiyun 		dev_err(cryp->dev, "Unable to allocate descriptor rings.\n");
511*4882a593Smuzhiyun 		goto err_resource;
512*4882a593Smuzhiyun 	}
513*4882a593Smuzhiyun 
514*4882a593Smuzhiyun 	/* Initialize hardware modules */
515*4882a593Smuzhiyun 	err = mtk_accelerator_init(cryp);
516*4882a593Smuzhiyun 	if (err) {
517*4882a593Smuzhiyun 		dev_err(cryp->dev, "Failed to initialize cryptographic engine.\n");
518*4882a593Smuzhiyun 		goto err_engine;
519*4882a593Smuzhiyun 	}
520*4882a593Smuzhiyun 
521*4882a593Smuzhiyun 	err = mtk_cipher_alg_register(cryp);
522*4882a593Smuzhiyun 	if (err) {
523*4882a593Smuzhiyun 		dev_err(cryp->dev, "Unable to register cipher algorithm.\n");
524*4882a593Smuzhiyun 		goto err_cipher;
525*4882a593Smuzhiyun 	}
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	err = mtk_hash_alg_register(cryp);
528*4882a593Smuzhiyun 	if (err) {
529*4882a593Smuzhiyun 		dev_err(cryp->dev, "Unable to register hash algorithm.\n");
530*4882a593Smuzhiyun 		goto err_hash;
531*4882a593Smuzhiyun 	}
532*4882a593Smuzhiyun 
533*4882a593Smuzhiyun 	platform_set_drvdata(pdev, cryp);
534*4882a593Smuzhiyun 	return 0;
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun err_hash:
537*4882a593Smuzhiyun 	mtk_cipher_alg_release(cryp);
538*4882a593Smuzhiyun err_cipher:
539*4882a593Smuzhiyun 	mtk_dfe_dse_reset(cryp);
540*4882a593Smuzhiyun err_engine:
541*4882a593Smuzhiyun 	mtk_desc_dma_free(cryp);
542*4882a593Smuzhiyun err_resource:
543*4882a593Smuzhiyun 	clk_disable_unprepare(cryp->clk_cryp);
544*4882a593Smuzhiyun err_clk_cryp:
545*4882a593Smuzhiyun 	pm_runtime_put_sync(cryp->dev);
546*4882a593Smuzhiyun 	pm_runtime_disable(cryp->dev);
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	return err;
549*4882a593Smuzhiyun }
550*4882a593Smuzhiyun 
mtk_crypto_remove(struct platform_device * pdev)551*4882a593Smuzhiyun static int mtk_crypto_remove(struct platform_device *pdev)
552*4882a593Smuzhiyun {
553*4882a593Smuzhiyun 	struct mtk_cryp *cryp = platform_get_drvdata(pdev);
554*4882a593Smuzhiyun 
555*4882a593Smuzhiyun 	mtk_hash_alg_release(cryp);
556*4882a593Smuzhiyun 	mtk_cipher_alg_release(cryp);
557*4882a593Smuzhiyun 	mtk_desc_dma_free(cryp);
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	clk_disable_unprepare(cryp->clk_cryp);
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 	pm_runtime_put_sync(cryp->dev);
562*4882a593Smuzhiyun 	pm_runtime_disable(cryp->dev);
563*4882a593Smuzhiyun 	platform_set_drvdata(pdev, NULL);
564*4882a593Smuzhiyun 
565*4882a593Smuzhiyun 	return 0;
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun 
568*4882a593Smuzhiyun static const struct of_device_id of_crypto_id[] = {
569*4882a593Smuzhiyun 	{ .compatible = "mediatek,eip97-crypto" },
570*4882a593Smuzhiyun 	{},
571*4882a593Smuzhiyun };
572*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, of_crypto_id);
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun static struct platform_driver mtk_crypto_driver = {
575*4882a593Smuzhiyun 	.probe = mtk_crypto_probe,
576*4882a593Smuzhiyun 	.remove = mtk_crypto_remove,
577*4882a593Smuzhiyun 	.driver = {
578*4882a593Smuzhiyun 		   .name = "mtk-crypto",
579*4882a593Smuzhiyun 		   .of_match_table = of_crypto_id,
580*4882a593Smuzhiyun 	},
581*4882a593Smuzhiyun };
582*4882a593Smuzhiyun module_platform_driver(mtk_crypto_driver);
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun MODULE_LICENSE("GPL");
585*4882a593Smuzhiyun MODULE_AUTHOR("Ryder Lee <ryder.lee@mediatek.com>");
586*4882a593Smuzhiyun MODULE_DESCRIPTION("Cryptographic accelerator driver for EIP97");
587