1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun //
3*4882a593Smuzhiyun // Cryptographic API.
4*4882a593Smuzhiyun //
5*4882a593Smuzhiyun // Support for Samsung S5PV210 and Exynos HW acceleration.
6*4882a593Smuzhiyun //
7*4882a593Smuzhiyun // Copyright (C) 2011 NetUP Inc. All rights reserved.
8*4882a593Smuzhiyun // Copyright (c) 2017 Samsung Electronics Co., Ltd. All rights reserved.
9*4882a593Smuzhiyun //
10*4882a593Smuzhiyun // Hash part based on omap-sham.c driver.
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include <linux/clk.h>
13*4882a593Smuzhiyun #include <linux/crypto.h>
14*4882a593Smuzhiyun #include <linux/dma-mapping.h>
15*4882a593Smuzhiyun #include <linux/err.h>
16*4882a593Smuzhiyun #include <linux/errno.h>
17*4882a593Smuzhiyun #include <linux/init.h>
18*4882a593Smuzhiyun #include <linux/interrupt.h>
19*4882a593Smuzhiyun #include <linux/io.h>
20*4882a593Smuzhiyun #include <linux/kernel.h>
21*4882a593Smuzhiyun #include <linux/module.h>
22*4882a593Smuzhiyun #include <linux/of.h>
23*4882a593Smuzhiyun #include <linux/platform_device.h>
24*4882a593Smuzhiyun #include <linux/scatterlist.h>
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #include <crypto/ctr.h>
27*4882a593Smuzhiyun #include <crypto/aes.h>
28*4882a593Smuzhiyun #include <crypto/algapi.h>
29*4882a593Smuzhiyun #include <crypto/scatterwalk.h>
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #include <crypto/hash.h>
32*4882a593Smuzhiyun #include <crypto/md5.h>
33*4882a593Smuzhiyun #include <crypto/sha.h>
34*4882a593Smuzhiyun #include <crypto/internal/hash.h>
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun #define _SBF(s, v) ((v) << (s))
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun /* Feed control registers */
39*4882a593Smuzhiyun #define SSS_REG_FCINTSTAT 0x0000
40*4882a593Smuzhiyun #define SSS_FCINTSTAT_HPARTINT BIT(7)
41*4882a593Smuzhiyun #define SSS_FCINTSTAT_HDONEINT BIT(5)
42*4882a593Smuzhiyun #define SSS_FCINTSTAT_BRDMAINT BIT(3)
43*4882a593Smuzhiyun #define SSS_FCINTSTAT_BTDMAINT BIT(2)
44*4882a593Smuzhiyun #define SSS_FCINTSTAT_HRDMAINT BIT(1)
45*4882a593Smuzhiyun #define SSS_FCINTSTAT_PKDMAINT BIT(0)
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun #define SSS_REG_FCINTENSET 0x0004
48*4882a593Smuzhiyun #define SSS_FCINTENSET_HPARTINTENSET BIT(7)
49*4882a593Smuzhiyun #define SSS_FCINTENSET_HDONEINTENSET BIT(5)
50*4882a593Smuzhiyun #define SSS_FCINTENSET_BRDMAINTENSET BIT(3)
51*4882a593Smuzhiyun #define SSS_FCINTENSET_BTDMAINTENSET BIT(2)
52*4882a593Smuzhiyun #define SSS_FCINTENSET_HRDMAINTENSET BIT(1)
53*4882a593Smuzhiyun #define SSS_FCINTENSET_PKDMAINTENSET BIT(0)
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun #define SSS_REG_FCINTENCLR 0x0008
56*4882a593Smuzhiyun #define SSS_FCINTENCLR_HPARTINTENCLR BIT(7)
57*4882a593Smuzhiyun #define SSS_FCINTENCLR_HDONEINTENCLR BIT(5)
58*4882a593Smuzhiyun #define SSS_FCINTENCLR_BRDMAINTENCLR BIT(3)
59*4882a593Smuzhiyun #define SSS_FCINTENCLR_BTDMAINTENCLR BIT(2)
60*4882a593Smuzhiyun #define SSS_FCINTENCLR_HRDMAINTENCLR BIT(1)
61*4882a593Smuzhiyun #define SSS_FCINTENCLR_PKDMAINTENCLR BIT(0)
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun #define SSS_REG_FCINTPEND 0x000C
64*4882a593Smuzhiyun #define SSS_FCINTPEND_HPARTINTP BIT(7)
65*4882a593Smuzhiyun #define SSS_FCINTPEND_HDONEINTP BIT(5)
66*4882a593Smuzhiyun #define SSS_FCINTPEND_BRDMAINTP BIT(3)
67*4882a593Smuzhiyun #define SSS_FCINTPEND_BTDMAINTP BIT(2)
68*4882a593Smuzhiyun #define SSS_FCINTPEND_HRDMAINTP BIT(1)
69*4882a593Smuzhiyun #define SSS_FCINTPEND_PKDMAINTP BIT(0)
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun #define SSS_REG_FCFIFOSTAT 0x0010
72*4882a593Smuzhiyun #define SSS_FCFIFOSTAT_BRFIFOFUL BIT(7)
73*4882a593Smuzhiyun #define SSS_FCFIFOSTAT_BRFIFOEMP BIT(6)
74*4882a593Smuzhiyun #define SSS_FCFIFOSTAT_BTFIFOFUL BIT(5)
75*4882a593Smuzhiyun #define SSS_FCFIFOSTAT_BTFIFOEMP BIT(4)
76*4882a593Smuzhiyun #define SSS_FCFIFOSTAT_HRFIFOFUL BIT(3)
77*4882a593Smuzhiyun #define SSS_FCFIFOSTAT_HRFIFOEMP BIT(2)
78*4882a593Smuzhiyun #define SSS_FCFIFOSTAT_PKFIFOFUL BIT(1)
79*4882a593Smuzhiyun #define SSS_FCFIFOSTAT_PKFIFOEMP BIT(0)
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun #define SSS_REG_FCFIFOCTRL 0x0014
82*4882a593Smuzhiyun #define SSS_FCFIFOCTRL_DESSEL BIT(2)
83*4882a593Smuzhiyun #define SSS_HASHIN_INDEPENDENT _SBF(0, 0x00)
84*4882a593Smuzhiyun #define SSS_HASHIN_CIPHER_INPUT _SBF(0, 0x01)
85*4882a593Smuzhiyun #define SSS_HASHIN_CIPHER_OUTPUT _SBF(0, 0x02)
86*4882a593Smuzhiyun #define SSS_HASHIN_MASK _SBF(0, 0x03)
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun #define SSS_REG_FCBRDMAS 0x0020
89*4882a593Smuzhiyun #define SSS_REG_FCBRDMAL 0x0024
90*4882a593Smuzhiyun #define SSS_REG_FCBRDMAC 0x0028
91*4882a593Smuzhiyun #define SSS_FCBRDMAC_BYTESWAP BIT(1)
92*4882a593Smuzhiyun #define SSS_FCBRDMAC_FLUSH BIT(0)
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun #define SSS_REG_FCBTDMAS 0x0030
95*4882a593Smuzhiyun #define SSS_REG_FCBTDMAL 0x0034
96*4882a593Smuzhiyun #define SSS_REG_FCBTDMAC 0x0038
97*4882a593Smuzhiyun #define SSS_FCBTDMAC_BYTESWAP BIT(1)
98*4882a593Smuzhiyun #define SSS_FCBTDMAC_FLUSH BIT(0)
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun #define SSS_REG_FCHRDMAS 0x0040
101*4882a593Smuzhiyun #define SSS_REG_FCHRDMAL 0x0044
102*4882a593Smuzhiyun #define SSS_REG_FCHRDMAC 0x0048
103*4882a593Smuzhiyun #define SSS_FCHRDMAC_BYTESWAP BIT(1)
104*4882a593Smuzhiyun #define SSS_FCHRDMAC_FLUSH BIT(0)
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun #define SSS_REG_FCPKDMAS 0x0050
107*4882a593Smuzhiyun #define SSS_REG_FCPKDMAL 0x0054
108*4882a593Smuzhiyun #define SSS_REG_FCPKDMAC 0x0058
109*4882a593Smuzhiyun #define SSS_FCPKDMAC_BYTESWAP BIT(3)
110*4882a593Smuzhiyun #define SSS_FCPKDMAC_DESCEND BIT(2)
111*4882a593Smuzhiyun #define SSS_FCPKDMAC_TRANSMIT BIT(1)
112*4882a593Smuzhiyun #define SSS_FCPKDMAC_FLUSH BIT(0)
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun #define SSS_REG_FCPKDMAO 0x005C
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun /* AES registers */
117*4882a593Smuzhiyun #define SSS_REG_AES_CONTROL 0x00
118*4882a593Smuzhiyun #define SSS_AES_BYTESWAP_DI BIT(11)
119*4882a593Smuzhiyun #define SSS_AES_BYTESWAP_DO BIT(10)
120*4882a593Smuzhiyun #define SSS_AES_BYTESWAP_IV BIT(9)
121*4882a593Smuzhiyun #define SSS_AES_BYTESWAP_CNT BIT(8)
122*4882a593Smuzhiyun #define SSS_AES_BYTESWAP_KEY BIT(7)
123*4882a593Smuzhiyun #define SSS_AES_KEY_CHANGE_MODE BIT(6)
124*4882a593Smuzhiyun #define SSS_AES_KEY_SIZE_128 _SBF(4, 0x00)
125*4882a593Smuzhiyun #define SSS_AES_KEY_SIZE_192 _SBF(4, 0x01)
126*4882a593Smuzhiyun #define SSS_AES_KEY_SIZE_256 _SBF(4, 0x02)
127*4882a593Smuzhiyun #define SSS_AES_FIFO_MODE BIT(3)
128*4882a593Smuzhiyun #define SSS_AES_CHAIN_MODE_ECB _SBF(1, 0x00)
129*4882a593Smuzhiyun #define SSS_AES_CHAIN_MODE_CBC _SBF(1, 0x01)
130*4882a593Smuzhiyun #define SSS_AES_CHAIN_MODE_CTR _SBF(1, 0x02)
131*4882a593Smuzhiyun #define SSS_AES_MODE_DECRYPT BIT(0)
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun #define SSS_REG_AES_STATUS 0x04
134*4882a593Smuzhiyun #define SSS_AES_BUSY BIT(2)
135*4882a593Smuzhiyun #define SSS_AES_INPUT_READY BIT(1)
136*4882a593Smuzhiyun #define SSS_AES_OUTPUT_READY BIT(0)
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun #define SSS_REG_AES_IN_DATA(s) (0x10 + (s << 2))
139*4882a593Smuzhiyun #define SSS_REG_AES_OUT_DATA(s) (0x20 + (s << 2))
140*4882a593Smuzhiyun #define SSS_REG_AES_IV_DATA(s) (0x30 + (s << 2))
141*4882a593Smuzhiyun #define SSS_REG_AES_CNT_DATA(s) (0x40 + (s << 2))
142*4882a593Smuzhiyun #define SSS_REG_AES_KEY_DATA(s) (0x80 + (s << 2))
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun #define SSS_REG(dev, reg) ((dev)->ioaddr + (SSS_REG_##reg))
145*4882a593Smuzhiyun #define SSS_READ(dev, reg) __raw_readl(SSS_REG(dev, reg))
146*4882a593Smuzhiyun #define SSS_WRITE(dev, reg, val) __raw_writel((val), SSS_REG(dev, reg))
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun #define SSS_AES_REG(dev, reg) ((dev)->aes_ioaddr + SSS_REG_##reg)
149*4882a593Smuzhiyun #define SSS_AES_WRITE(dev, reg, val) __raw_writel((val), \
150*4882a593Smuzhiyun SSS_AES_REG(dev, reg))
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun /* HW engine modes */
153*4882a593Smuzhiyun #define FLAGS_AES_DECRYPT BIT(0)
154*4882a593Smuzhiyun #define FLAGS_AES_MODE_MASK _SBF(1, 0x03)
155*4882a593Smuzhiyun #define FLAGS_AES_CBC _SBF(1, 0x01)
156*4882a593Smuzhiyun #define FLAGS_AES_CTR _SBF(1, 0x02)
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun #define AES_KEY_LEN 16
159*4882a593Smuzhiyun #define CRYPTO_QUEUE_LEN 1
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun /* HASH registers */
162*4882a593Smuzhiyun #define SSS_REG_HASH_CTRL 0x00
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun #define SSS_HASH_USER_IV_EN BIT(5)
165*4882a593Smuzhiyun #define SSS_HASH_INIT_BIT BIT(4)
166*4882a593Smuzhiyun #define SSS_HASH_ENGINE_SHA1 _SBF(1, 0x00)
167*4882a593Smuzhiyun #define SSS_HASH_ENGINE_MD5 _SBF(1, 0x01)
168*4882a593Smuzhiyun #define SSS_HASH_ENGINE_SHA256 _SBF(1, 0x02)
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun #define SSS_HASH_ENGINE_MASK _SBF(1, 0x03)
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun #define SSS_REG_HASH_CTRL_PAUSE 0x04
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun #define SSS_HASH_PAUSE BIT(0)
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun #define SSS_REG_HASH_CTRL_FIFO 0x08
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun #define SSS_HASH_FIFO_MODE_DMA BIT(0)
179*4882a593Smuzhiyun #define SSS_HASH_FIFO_MODE_CPU 0
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun #define SSS_REG_HASH_CTRL_SWAP 0x0C
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun #define SSS_HASH_BYTESWAP_DI BIT(3)
184*4882a593Smuzhiyun #define SSS_HASH_BYTESWAP_DO BIT(2)
185*4882a593Smuzhiyun #define SSS_HASH_BYTESWAP_IV BIT(1)
186*4882a593Smuzhiyun #define SSS_HASH_BYTESWAP_KEY BIT(0)
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun #define SSS_REG_HASH_STATUS 0x10
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun #define SSS_HASH_STATUS_MSG_DONE BIT(6)
191*4882a593Smuzhiyun #define SSS_HASH_STATUS_PARTIAL_DONE BIT(4)
192*4882a593Smuzhiyun #define SSS_HASH_STATUS_BUFFER_READY BIT(0)
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun #define SSS_REG_HASH_MSG_SIZE_LOW 0x20
195*4882a593Smuzhiyun #define SSS_REG_HASH_MSG_SIZE_HIGH 0x24
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun #define SSS_REG_HASH_PRE_MSG_SIZE_LOW 0x28
198*4882a593Smuzhiyun #define SSS_REG_HASH_PRE_MSG_SIZE_HIGH 0x2C
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun #define SSS_REG_HASH_IV(s) (0xB0 + ((s) << 2))
201*4882a593Smuzhiyun #define SSS_REG_HASH_OUT(s) (0x100 + ((s) << 2))
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun #define HASH_BLOCK_SIZE 64
204*4882a593Smuzhiyun #define HASH_REG_SIZEOF 4
205*4882a593Smuzhiyun #define HASH_MD5_MAX_REG (MD5_DIGEST_SIZE / HASH_REG_SIZEOF)
206*4882a593Smuzhiyun #define HASH_SHA1_MAX_REG (SHA1_DIGEST_SIZE / HASH_REG_SIZEOF)
207*4882a593Smuzhiyun #define HASH_SHA256_MAX_REG (SHA256_DIGEST_SIZE / HASH_REG_SIZEOF)
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun /*
210*4882a593Smuzhiyun * HASH bit numbers, used by device, setting in dev->hash_flags with
211*4882a593Smuzhiyun * functions set_bit(), clear_bit() or tested with test_bit() or BIT(),
212*4882a593Smuzhiyun * to keep HASH state BUSY or FREE, or to signal state from irq_handler
213*4882a593Smuzhiyun * to hash_tasklet. SGS keep track of allocated memory for scatterlist
214*4882a593Smuzhiyun */
215*4882a593Smuzhiyun #define HASH_FLAGS_BUSY 0
216*4882a593Smuzhiyun #define HASH_FLAGS_FINAL 1
217*4882a593Smuzhiyun #define HASH_FLAGS_DMA_ACTIVE 2
218*4882a593Smuzhiyun #define HASH_FLAGS_OUTPUT_READY 3
219*4882a593Smuzhiyun #define HASH_FLAGS_DMA_READY 4
220*4882a593Smuzhiyun #define HASH_FLAGS_SGS_COPIED 5
221*4882a593Smuzhiyun #define HASH_FLAGS_SGS_ALLOCED 6
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun /* HASH HW constants */
224*4882a593Smuzhiyun #define BUFLEN HASH_BLOCK_SIZE
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun #define SSS_HASH_DMA_LEN_ALIGN 8
227*4882a593Smuzhiyun #define SSS_HASH_DMA_ALIGN_MASK (SSS_HASH_DMA_LEN_ALIGN - 1)
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun #define SSS_HASH_QUEUE_LENGTH 10
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun /**
232*4882a593Smuzhiyun * struct samsung_aes_variant - platform specific SSS driver data
233*4882a593Smuzhiyun * @aes_offset: AES register offset from SSS module's base.
234*4882a593Smuzhiyun * @hash_offset: HASH register offset from SSS module's base.
235*4882a593Smuzhiyun * @clk_names: names of clocks needed to run SSS IP
236*4882a593Smuzhiyun *
237*4882a593Smuzhiyun * Specifies platform specific configuration of SSS module.
238*4882a593Smuzhiyun * Note: A structure for driver specific platform data is used for future
239*4882a593Smuzhiyun * expansion of its usage.
240*4882a593Smuzhiyun */
241*4882a593Smuzhiyun struct samsung_aes_variant {
242*4882a593Smuzhiyun unsigned int aes_offset;
243*4882a593Smuzhiyun unsigned int hash_offset;
244*4882a593Smuzhiyun const char *clk_names[2];
245*4882a593Smuzhiyun };
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun struct s5p_aes_reqctx {
248*4882a593Smuzhiyun unsigned long mode;
249*4882a593Smuzhiyun };
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun struct s5p_aes_ctx {
252*4882a593Smuzhiyun struct s5p_aes_dev *dev;
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun u8 aes_key[AES_MAX_KEY_SIZE];
255*4882a593Smuzhiyun u8 nonce[CTR_RFC3686_NONCE_SIZE];
256*4882a593Smuzhiyun int keylen;
257*4882a593Smuzhiyun };
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun /**
260*4882a593Smuzhiyun * struct s5p_aes_dev - Crypto device state container
261*4882a593Smuzhiyun * @dev: Associated device
262*4882a593Smuzhiyun * @clk: Clock for accessing hardware
263*4882a593Smuzhiyun * @pclk: APB bus clock necessary to access the hardware
264*4882a593Smuzhiyun * @ioaddr: Mapped IO memory region
265*4882a593Smuzhiyun * @aes_ioaddr: Per-varian offset for AES block IO memory
266*4882a593Smuzhiyun * @irq_fc: Feed control interrupt line
267*4882a593Smuzhiyun * @req: Crypto request currently handled by the device
268*4882a593Smuzhiyun * @ctx: Configuration for currently handled crypto request
269*4882a593Smuzhiyun * @sg_src: Scatter list with source data for currently handled block
270*4882a593Smuzhiyun * in device. This is DMA-mapped into device.
271*4882a593Smuzhiyun * @sg_dst: Scatter list with destination data for currently handled block
272*4882a593Smuzhiyun * in device. This is DMA-mapped into device.
273*4882a593Smuzhiyun * @sg_src_cpy: In case of unaligned access, copied scatter list
274*4882a593Smuzhiyun * with source data.
275*4882a593Smuzhiyun * @sg_dst_cpy: In case of unaligned access, copied scatter list
276*4882a593Smuzhiyun * with destination data.
277*4882a593Smuzhiyun * @tasklet: New request scheduling jib
278*4882a593Smuzhiyun * @queue: Crypto queue
279*4882a593Smuzhiyun * @busy: Indicates whether the device is currently handling some request
280*4882a593Smuzhiyun * thus it uses some of the fields from this state, like:
281*4882a593Smuzhiyun * req, ctx, sg_src/dst (and copies). This essentially
282*4882a593Smuzhiyun * protects against concurrent access to these fields.
283*4882a593Smuzhiyun * @lock: Lock for protecting both access to device hardware registers
284*4882a593Smuzhiyun * and fields related to current request (including the busy field).
285*4882a593Smuzhiyun * @res: Resources for hash.
286*4882a593Smuzhiyun * @io_hash_base: Per-variant offset for HASH block IO memory.
287*4882a593Smuzhiyun * @hash_lock: Lock for protecting hash_req, hash_queue and hash_flags
288*4882a593Smuzhiyun * variable.
289*4882a593Smuzhiyun * @hash_flags: Flags for current HASH op.
290*4882a593Smuzhiyun * @hash_queue: Async hash queue.
291*4882a593Smuzhiyun * @hash_tasklet: New HASH request scheduling job.
292*4882a593Smuzhiyun * @xmit_buf: Buffer for current HASH request transfer into SSS block.
293*4882a593Smuzhiyun * @hash_req: Current request sending to SSS HASH block.
294*4882a593Smuzhiyun * @hash_sg_iter: Scatterlist transferred through DMA into SSS HASH block.
295*4882a593Smuzhiyun * @hash_sg_cnt: Counter for hash_sg_iter.
296*4882a593Smuzhiyun *
297*4882a593Smuzhiyun * @use_hash: true if HASH algs enabled
298*4882a593Smuzhiyun */
299*4882a593Smuzhiyun struct s5p_aes_dev {
300*4882a593Smuzhiyun struct device *dev;
301*4882a593Smuzhiyun struct clk *clk;
302*4882a593Smuzhiyun struct clk *pclk;
303*4882a593Smuzhiyun void __iomem *ioaddr;
304*4882a593Smuzhiyun void __iomem *aes_ioaddr;
305*4882a593Smuzhiyun int irq_fc;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun struct skcipher_request *req;
308*4882a593Smuzhiyun struct s5p_aes_ctx *ctx;
309*4882a593Smuzhiyun struct scatterlist *sg_src;
310*4882a593Smuzhiyun struct scatterlist *sg_dst;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun struct scatterlist *sg_src_cpy;
313*4882a593Smuzhiyun struct scatterlist *sg_dst_cpy;
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun struct tasklet_struct tasklet;
316*4882a593Smuzhiyun struct crypto_queue queue;
317*4882a593Smuzhiyun bool busy;
318*4882a593Smuzhiyun spinlock_t lock;
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun struct resource *res;
321*4882a593Smuzhiyun void __iomem *io_hash_base;
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun spinlock_t hash_lock; /* protect hash_ vars */
324*4882a593Smuzhiyun unsigned long hash_flags;
325*4882a593Smuzhiyun struct crypto_queue hash_queue;
326*4882a593Smuzhiyun struct tasklet_struct hash_tasklet;
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun u8 xmit_buf[BUFLEN];
329*4882a593Smuzhiyun struct ahash_request *hash_req;
330*4882a593Smuzhiyun struct scatterlist *hash_sg_iter;
331*4882a593Smuzhiyun unsigned int hash_sg_cnt;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun bool use_hash;
334*4882a593Smuzhiyun };
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun /**
337*4882a593Smuzhiyun * struct s5p_hash_reqctx - HASH request context
338*4882a593Smuzhiyun * @dd: Associated device
339*4882a593Smuzhiyun * @op_update: Current request operation (OP_UPDATE or OP_FINAL)
340*4882a593Smuzhiyun * @digcnt: Number of bytes processed by HW (without buffer[] ones)
341*4882a593Smuzhiyun * @digest: Digest message or IV for partial result
342*4882a593Smuzhiyun * @nregs: Number of HW registers for digest or IV read/write
343*4882a593Smuzhiyun * @engine: Bits for selecting type of HASH in SSS block
344*4882a593Smuzhiyun * @sg: sg for DMA transfer
345*4882a593Smuzhiyun * @sg_len: Length of sg for DMA transfer
346*4882a593Smuzhiyun * @sgl: sg for joining buffer and req->src scatterlist
347*4882a593Smuzhiyun * @skip: Skip offset in req->src for current op
348*4882a593Smuzhiyun * @total: Total number of bytes for current request
349*4882a593Smuzhiyun * @finup: Keep state for finup or final.
350*4882a593Smuzhiyun * @error: Keep track of error.
351*4882a593Smuzhiyun * @bufcnt: Number of bytes holded in buffer[]
352*4882a593Smuzhiyun * @buffer: For byte(s) from end of req->src in UPDATE op
353*4882a593Smuzhiyun */
354*4882a593Smuzhiyun struct s5p_hash_reqctx {
355*4882a593Smuzhiyun struct s5p_aes_dev *dd;
356*4882a593Smuzhiyun bool op_update;
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun u64 digcnt;
359*4882a593Smuzhiyun u8 digest[SHA256_DIGEST_SIZE];
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun unsigned int nregs; /* digest_size / sizeof(reg) */
362*4882a593Smuzhiyun u32 engine;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun struct scatterlist *sg;
365*4882a593Smuzhiyun unsigned int sg_len;
366*4882a593Smuzhiyun struct scatterlist sgl[2];
367*4882a593Smuzhiyun unsigned int skip;
368*4882a593Smuzhiyun unsigned int total;
369*4882a593Smuzhiyun bool finup;
370*4882a593Smuzhiyun bool error;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun u32 bufcnt;
373*4882a593Smuzhiyun u8 buffer[];
374*4882a593Smuzhiyun };
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun /**
377*4882a593Smuzhiyun * struct s5p_hash_ctx - HASH transformation context
378*4882a593Smuzhiyun * @dd: Associated device
379*4882a593Smuzhiyun * @flags: Bits for algorithm HASH.
380*4882a593Smuzhiyun * @fallback: Software transformation for zero message or size < BUFLEN.
381*4882a593Smuzhiyun */
382*4882a593Smuzhiyun struct s5p_hash_ctx {
383*4882a593Smuzhiyun struct s5p_aes_dev *dd;
384*4882a593Smuzhiyun unsigned long flags;
385*4882a593Smuzhiyun struct crypto_shash *fallback;
386*4882a593Smuzhiyun };
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun static const struct samsung_aes_variant s5p_aes_data = {
389*4882a593Smuzhiyun .aes_offset = 0x4000,
390*4882a593Smuzhiyun .hash_offset = 0x6000,
391*4882a593Smuzhiyun .clk_names = { "secss", },
392*4882a593Smuzhiyun };
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun static const struct samsung_aes_variant exynos_aes_data = {
395*4882a593Smuzhiyun .aes_offset = 0x200,
396*4882a593Smuzhiyun .hash_offset = 0x400,
397*4882a593Smuzhiyun .clk_names = { "secss", },
398*4882a593Smuzhiyun };
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun static const struct samsung_aes_variant exynos5433_slim_aes_data = {
401*4882a593Smuzhiyun .aes_offset = 0x400,
402*4882a593Smuzhiyun .hash_offset = 0x800,
403*4882a593Smuzhiyun .clk_names = { "pclk", "aclk", },
404*4882a593Smuzhiyun };
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun static const struct of_device_id s5p_sss_dt_match[] = {
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun .compatible = "samsung,s5pv210-secss",
409*4882a593Smuzhiyun .data = &s5p_aes_data,
410*4882a593Smuzhiyun },
411*4882a593Smuzhiyun {
412*4882a593Smuzhiyun .compatible = "samsung,exynos4210-secss",
413*4882a593Smuzhiyun .data = &exynos_aes_data,
414*4882a593Smuzhiyun },
415*4882a593Smuzhiyun {
416*4882a593Smuzhiyun .compatible = "samsung,exynos5433-slim-sss",
417*4882a593Smuzhiyun .data = &exynos5433_slim_aes_data,
418*4882a593Smuzhiyun },
419*4882a593Smuzhiyun { },
420*4882a593Smuzhiyun };
421*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, s5p_sss_dt_match);
422*4882a593Smuzhiyun
find_s5p_sss_version(const struct platform_device * pdev)423*4882a593Smuzhiyun static inline const struct samsung_aes_variant *find_s5p_sss_version
424*4882a593Smuzhiyun (const struct platform_device *pdev)
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_OF) && (pdev->dev.of_node)) {
427*4882a593Smuzhiyun const struct of_device_id *match;
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun match = of_match_node(s5p_sss_dt_match,
430*4882a593Smuzhiyun pdev->dev.of_node);
431*4882a593Smuzhiyun return (const struct samsung_aes_variant *)match->data;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun return (const struct samsung_aes_variant *)
434*4882a593Smuzhiyun platform_get_device_id(pdev)->driver_data;
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun static struct s5p_aes_dev *s5p_dev;
438*4882a593Smuzhiyun
s5p_set_dma_indata(struct s5p_aes_dev * dev,const struct scatterlist * sg)439*4882a593Smuzhiyun static void s5p_set_dma_indata(struct s5p_aes_dev *dev,
440*4882a593Smuzhiyun const struct scatterlist *sg)
441*4882a593Smuzhiyun {
442*4882a593Smuzhiyun SSS_WRITE(dev, FCBRDMAS, sg_dma_address(sg));
443*4882a593Smuzhiyun SSS_WRITE(dev, FCBRDMAL, sg_dma_len(sg));
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun
s5p_set_dma_outdata(struct s5p_aes_dev * dev,const struct scatterlist * sg)446*4882a593Smuzhiyun static void s5p_set_dma_outdata(struct s5p_aes_dev *dev,
447*4882a593Smuzhiyun const struct scatterlist *sg)
448*4882a593Smuzhiyun {
449*4882a593Smuzhiyun SSS_WRITE(dev, FCBTDMAS, sg_dma_address(sg));
450*4882a593Smuzhiyun SSS_WRITE(dev, FCBTDMAL, sg_dma_len(sg));
451*4882a593Smuzhiyun }
452*4882a593Smuzhiyun
s5p_free_sg_cpy(struct s5p_aes_dev * dev,struct scatterlist ** sg)453*4882a593Smuzhiyun static void s5p_free_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist **sg)
454*4882a593Smuzhiyun {
455*4882a593Smuzhiyun int len;
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun if (!*sg)
458*4882a593Smuzhiyun return;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun len = ALIGN(dev->req->cryptlen, AES_BLOCK_SIZE);
461*4882a593Smuzhiyun free_pages((unsigned long)sg_virt(*sg), get_order(len));
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun kfree(*sg);
464*4882a593Smuzhiyun *sg = NULL;
465*4882a593Smuzhiyun }
466*4882a593Smuzhiyun
s5p_sg_copy_buf(void * buf,struct scatterlist * sg,unsigned int nbytes,int out)467*4882a593Smuzhiyun static void s5p_sg_copy_buf(void *buf, struct scatterlist *sg,
468*4882a593Smuzhiyun unsigned int nbytes, int out)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun struct scatter_walk walk;
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun if (!nbytes)
473*4882a593Smuzhiyun return;
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun scatterwalk_start(&walk, sg);
476*4882a593Smuzhiyun scatterwalk_copychunks(buf, &walk, nbytes, out);
477*4882a593Smuzhiyun scatterwalk_done(&walk, out, 0);
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun
s5p_sg_done(struct s5p_aes_dev * dev)480*4882a593Smuzhiyun static void s5p_sg_done(struct s5p_aes_dev *dev)
481*4882a593Smuzhiyun {
482*4882a593Smuzhiyun struct skcipher_request *req = dev->req;
483*4882a593Smuzhiyun struct s5p_aes_reqctx *reqctx = skcipher_request_ctx(req);
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun if (dev->sg_dst_cpy) {
486*4882a593Smuzhiyun dev_dbg(dev->dev,
487*4882a593Smuzhiyun "Copying %d bytes of output data back to original place\n",
488*4882a593Smuzhiyun dev->req->cryptlen);
489*4882a593Smuzhiyun s5p_sg_copy_buf(sg_virt(dev->sg_dst_cpy), dev->req->dst,
490*4882a593Smuzhiyun dev->req->cryptlen, 1);
491*4882a593Smuzhiyun }
492*4882a593Smuzhiyun s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
493*4882a593Smuzhiyun s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
494*4882a593Smuzhiyun if (reqctx->mode & FLAGS_AES_CBC)
495*4882a593Smuzhiyun memcpy_fromio(req->iv, dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), AES_BLOCK_SIZE);
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun else if (reqctx->mode & FLAGS_AES_CTR)
498*4882a593Smuzhiyun memcpy_fromio(req->iv, dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), AES_BLOCK_SIZE);
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun /* Calls the completion. Cannot be called with dev->lock hold. */
s5p_aes_complete(struct skcipher_request * req,int err)502*4882a593Smuzhiyun static void s5p_aes_complete(struct skcipher_request *req, int err)
503*4882a593Smuzhiyun {
504*4882a593Smuzhiyun req->base.complete(&req->base, err);
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun
s5p_unset_outdata(struct s5p_aes_dev * dev)507*4882a593Smuzhiyun static void s5p_unset_outdata(struct s5p_aes_dev *dev)
508*4882a593Smuzhiyun {
509*4882a593Smuzhiyun dma_unmap_sg(dev->dev, dev->sg_dst, 1, DMA_FROM_DEVICE);
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun
s5p_unset_indata(struct s5p_aes_dev * dev)512*4882a593Smuzhiyun static void s5p_unset_indata(struct s5p_aes_dev *dev)
513*4882a593Smuzhiyun {
514*4882a593Smuzhiyun dma_unmap_sg(dev->dev, dev->sg_src, 1, DMA_TO_DEVICE);
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun
s5p_make_sg_cpy(struct s5p_aes_dev * dev,struct scatterlist * src,struct scatterlist ** dst)517*4882a593Smuzhiyun static int s5p_make_sg_cpy(struct s5p_aes_dev *dev, struct scatterlist *src,
518*4882a593Smuzhiyun struct scatterlist **dst)
519*4882a593Smuzhiyun {
520*4882a593Smuzhiyun void *pages;
521*4882a593Smuzhiyun int len;
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun *dst = kmalloc(sizeof(**dst), GFP_ATOMIC);
524*4882a593Smuzhiyun if (!*dst)
525*4882a593Smuzhiyun return -ENOMEM;
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun len = ALIGN(dev->req->cryptlen, AES_BLOCK_SIZE);
528*4882a593Smuzhiyun pages = (void *)__get_free_pages(GFP_ATOMIC, get_order(len));
529*4882a593Smuzhiyun if (!pages) {
530*4882a593Smuzhiyun kfree(*dst);
531*4882a593Smuzhiyun *dst = NULL;
532*4882a593Smuzhiyun return -ENOMEM;
533*4882a593Smuzhiyun }
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun s5p_sg_copy_buf(pages, src, dev->req->cryptlen, 0);
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun sg_init_table(*dst, 1);
538*4882a593Smuzhiyun sg_set_buf(*dst, pages, len);
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun return 0;
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun
s5p_set_outdata(struct s5p_aes_dev * dev,struct scatterlist * sg)543*4882a593Smuzhiyun static int s5p_set_outdata(struct s5p_aes_dev *dev, struct scatterlist *sg)
544*4882a593Smuzhiyun {
545*4882a593Smuzhiyun if (!sg->length)
546*4882a593Smuzhiyun return -EINVAL;
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun if (!dma_map_sg(dev->dev, sg, 1, DMA_FROM_DEVICE))
549*4882a593Smuzhiyun return -ENOMEM;
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun dev->sg_dst = sg;
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun return 0;
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun
s5p_set_indata(struct s5p_aes_dev * dev,struct scatterlist * sg)556*4882a593Smuzhiyun static int s5p_set_indata(struct s5p_aes_dev *dev, struct scatterlist *sg)
557*4882a593Smuzhiyun {
558*4882a593Smuzhiyun if (!sg->length)
559*4882a593Smuzhiyun return -EINVAL;
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun if (!dma_map_sg(dev->dev, sg, 1, DMA_TO_DEVICE))
562*4882a593Smuzhiyun return -ENOMEM;
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun dev->sg_src = sg;
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun return 0;
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun /*
570*4882a593Smuzhiyun * Returns -ERRNO on error (mapping of new data failed).
571*4882a593Smuzhiyun * On success returns:
572*4882a593Smuzhiyun * - 0 if there is no more data,
573*4882a593Smuzhiyun * - 1 if new transmitting (output) data is ready and its address+length
574*4882a593Smuzhiyun * have to be written to device (by calling s5p_set_dma_outdata()).
575*4882a593Smuzhiyun */
s5p_aes_tx(struct s5p_aes_dev * dev)576*4882a593Smuzhiyun static int s5p_aes_tx(struct s5p_aes_dev *dev)
577*4882a593Smuzhiyun {
578*4882a593Smuzhiyun int ret = 0;
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun s5p_unset_outdata(dev);
581*4882a593Smuzhiyun
582*4882a593Smuzhiyun if (!sg_is_last(dev->sg_dst)) {
583*4882a593Smuzhiyun ret = s5p_set_outdata(dev, sg_next(dev->sg_dst));
584*4882a593Smuzhiyun if (!ret)
585*4882a593Smuzhiyun ret = 1;
586*4882a593Smuzhiyun }
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun return ret;
589*4882a593Smuzhiyun }
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun /*
592*4882a593Smuzhiyun * Returns -ERRNO on error (mapping of new data failed).
593*4882a593Smuzhiyun * On success returns:
594*4882a593Smuzhiyun * - 0 if there is no more data,
595*4882a593Smuzhiyun * - 1 if new receiving (input) data is ready and its address+length
596*4882a593Smuzhiyun * have to be written to device (by calling s5p_set_dma_indata()).
597*4882a593Smuzhiyun */
s5p_aes_rx(struct s5p_aes_dev * dev)598*4882a593Smuzhiyun static int s5p_aes_rx(struct s5p_aes_dev *dev/*, bool *set_dma*/)
599*4882a593Smuzhiyun {
600*4882a593Smuzhiyun int ret = 0;
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun s5p_unset_indata(dev);
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun if (!sg_is_last(dev->sg_src)) {
605*4882a593Smuzhiyun ret = s5p_set_indata(dev, sg_next(dev->sg_src));
606*4882a593Smuzhiyun if (!ret)
607*4882a593Smuzhiyun ret = 1;
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun return ret;
611*4882a593Smuzhiyun }
612*4882a593Smuzhiyun
s5p_hash_read(struct s5p_aes_dev * dd,u32 offset)613*4882a593Smuzhiyun static inline u32 s5p_hash_read(struct s5p_aes_dev *dd, u32 offset)
614*4882a593Smuzhiyun {
615*4882a593Smuzhiyun return __raw_readl(dd->io_hash_base + offset);
616*4882a593Smuzhiyun }
617*4882a593Smuzhiyun
s5p_hash_write(struct s5p_aes_dev * dd,u32 offset,u32 value)618*4882a593Smuzhiyun static inline void s5p_hash_write(struct s5p_aes_dev *dd,
619*4882a593Smuzhiyun u32 offset, u32 value)
620*4882a593Smuzhiyun {
621*4882a593Smuzhiyun __raw_writel(value, dd->io_hash_base + offset);
622*4882a593Smuzhiyun }
623*4882a593Smuzhiyun
624*4882a593Smuzhiyun /**
625*4882a593Smuzhiyun * s5p_set_dma_hashdata() - start DMA with sg
626*4882a593Smuzhiyun * @dev: device
627*4882a593Smuzhiyun * @sg: scatterlist ready to DMA transmit
628*4882a593Smuzhiyun */
s5p_set_dma_hashdata(struct s5p_aes_dev * dev,const struct scatterlist * sg)629*4882a593Smuzhiyun static void s5p_set_dma_hashdata(struct s5p_aes_dev *dev,
630*4882a593Smuzhiyun const struct scatterlist *sg)
631*4882a593Smuzhiyun {
632*4882a593Smuzhiyun dev->hash_sg_cnt--;
633*4882a593Smuzhiyun SSS_WRITE(dev, FCHRDMAS, sg_dma_address(sg));
634*4882a593Smuzhiyun SSS_WRITE(dev, FCHRDMAL, sg_dma_len(sg)); /* DMA starts */
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun /**
638*4882a593Smuzhiyun * s5p_hash_rx() - get next hash_sg_iter
639*4882a593Smuzhiyun * @dev: device
640*4882a593Smuzhiyun *
641*4882a593Smuzhiyun * Return:
642*4882a593Smuzhiyun * 2 if there is no more data and it is UPDATE op
643*4882a593Smuzhiyun * 1 if new receiving (input) data is ready and can be written to device
644*4882a593Smuzhiyun * 0 if there is no more data and it is FINAL op
645*4882a593Smuzhiyun */
s5p_hash_rx(struct s5p_aes_dev * dev)646*4882a593Smuzhiyun static int s5p_hash_rx(struct s5p_aes_dev *dev)
647*4882a593Smuzhiyun {
648*4882a593Smuzhiyun if (dev->hash_sg_cnt > 0) {
649*4882a593Smuzhiyun dev->hash_sg_iter = sg_next(dev->hash_sg_iter);
650*4882a593Smuzhiyun return 1;
651*4882a593Smuzhiyun }
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun set_bit(HASH_FLAGS_DMA_READY, &dev->hash_flags);
654*4882a593Smuzhiyun if (test_bit(HASH_FLAGS_FINAL, &dev->hash_flags))
655*4882a593Smuzhiyun return 0;
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun return 2;
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun
s5p_aes_interrupt(int irq,void * dev_id)660*4882a593Smuzhiyun static irqreturn_t s5p_aes_interrupt(int irq, void *dev_id)
661*4882a593Smuzhiyun {
662*4882a593Smuzhiyun struct platform_device *pdev = dev_id;
663*4882a593Smuzhiyun struct s5p_aes_dev *dev = platform_get_drvdata(pdev);
664*4882a593Smuzhiyun struct skcipher_request *req;
665*4882a593Smuzhiyun int err_dma_tx = 0;
666*4882a593Smuzhiyun int err_dma_rx = 0;
667*4882a593Smuzhiyun int err_dma_hx = 0;
668*4882a593Smuzhiyun bool tx_end = false;
669*4882a593Smuzhiyun bool hx_end = false;
670*4882a593Smuzhiyun unsigned long flags;
671*4882a593Smuzhiyun u32 status, st_bits;
672*4882a593Smuzhiyun int err;
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun spin_lock_irqsave(&dev->lock, flags);
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun /*
677*4882a593Smuzhiyun * Handle rx or tx interrupt. If there is still data (scatterlist did not
678*4882a593Smuzhiyun * reach end), then map next scatterlist entry.
679*4882a593Smuzhiyun * In case of such mapping error, s5p_aes_complete() should be called.
680*4882a593Smuzhiyun *
681*4882a593Smuzhiyun * If there is no more data in tx scatter list, call s5p_aes_complete()
682*4882a593Smuzhiyun * and schedule new tasklet.
683*4882a593Smuzhiyun *
684*4882a593Smuzhiyun * Handle hx interrupt. If there is still data map next entry.
685*4882a593Smuzhiyun */
686*4882a593Smuzhiyun status = SSS_READ(dev, FCINTSTAT);
687*4882a593Smuzhiyun if (status & SSS_FCINTSTAT_BRDMAINT)
688*4882a593Smuzhiyun err_dma_rx = s5p_aes_rx(dev);
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun if (status & SSS_FCINTSTAT_BTDMAINT) {
691*4882a593Smuzhiyun if (sg_is_last(dev->sg_dst))
692*4882a593Smuzhiyun tx_end = true;
693*4882a593Smuzhiyun err_dma_tx = s5p_aes_tx(dev);
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun if (status & SSS_FCINTSTAT_HRDMAINT)
697*4882a593Smuzhiyun err_dma_hx = s5p_hash_rx(dev);
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun st_bits = status & (SSS_FCINTSTAT_BRDMAINT | SSS_FCINTSTAT_BTDMAINT |
700*4882a593Smuzhiyun SSS_FCINTSTAT_HRDMAINT);
701*4882a593Smuzhiyun /* clear DMA bits */
702*4882a593Smuzhiyun SSS_WRITE(dev, FCINTPEND, st_bits);
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun /* clear HASH irq bits */
705*4882a593Smuzhiyun if (status & (SSS_FCINTSTAT_HDONEINT | SSS_FCINTSTAT_HPARTINT)) {
706*4882a593Smuzhiyun /* cannot have both HPART and HDONE */
707*4882a593Smuzhiyun if (status & SSS_FCINTSTAT_HPARTINT)
708*4882a593Smuzhiyun st_bits = SSS_HASH_STATUS_PARTIAL_DONE;
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun if (status & SSS_FCINTSTAT_HDONEINT)
711*4882a593Smuzhiyun st_bits = SSS_HASH_STATUS_MSG_DONE;
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun set_bit(HASH_FLAGS_OUTPUT_READY, &dev->hash_flags);
714*4882a593Smuzhiyun s5p_hash_write(dev, SSS_REG_HASH_STATUS, st_bits);
715*4882a593Smuzhiyun hx_end = true;
716*4882a593Smuzhiyun /* when DONE or PART, do not handle HASH DMA */
717*4882a593Smuzhiyun err_dma_hx = 0;
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun if (err_dma_rx < 0) {
721*4882a593Smuzhiyun err = err_dma_rx;
722*4882a593Smuzhiyun goto error;
723*4882a593Smuzhiyun }
724*4882a593Smuzhiyun if (err_dma_tx < 0) {
725*4882a593Smuzhiyun err = err_dma_tx;
726*4882a593Smuzhiyun goto error;
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun if (tx_end) {
730*4882a593Smuzhiyun s5p_sg_done(dev);
731*4882a593Smuzhiyun if (err_dma_hx == 1)
732*4882a593Smuzhiyun s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->lock, flags);
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun s5p_aes_complete(dev->req, 0);
737*4882a593Smuzhiyun /* Device is still busy */
738*4882a593Smuzhiyun tasklet_schedule(&dev->tasklet);
739*4882a593Smuzhiyun } else {
740*4882a593Smuzhiyun /*
741*4882a593Smuzhiyun * Writing length of DMA block (either receiving or
742*4882a593Smuzhiyun * transmitting) will start the operation immediately, so this
743*4882a593Smuzhiyun * should be done at the end (even after clearing pending
744*4882a593Smuzhiyun * interrupts to not miss the interrupt).
745*4882a593Smuzhiyun */
746*4882a593Smuzhiyun if (err_dma_tx == 1)
747*4882a593Smuzhiyun s5p_set_dma_outdata(dev, dev->sg_dst);
748*4882a593Smuzhiyun if (err_dma_rx == 1)
749*4882a593Smuzhiyun s5p_set_dma_indata(dev, dev->sg_src);
750*4882a593Smuzhiyun if (err_dma_hx == 1)
751*4882a593Smuzhiyun s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->lock, flags);
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun
756*4882a593Smuzhiyun goto hash_irq_end;
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun error:
759*4882a593Smuzhiyun s5p_sg_done(dev);
760*4882a593Smuzhiyun dev->busy = false;
761*4882a593Smuzhiyun req = dev->req;
762*4882a593Smuzhiyun if (err_dma_hx == 1)
763*4882a593Smuzhiyun s5p_set_dma_hashdata(dev, dev->hash_sg_iter);
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->lock, flags);
766*4882a593Smuzhiyun s5p_aes_complete(req, err);
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun hash_irq_end:
769*4882a593Smuzhiyun /*
770*4882a593Smuzhiyun * Note about else if:
771*4882a593Smuzhiyun * when hash_sg_iter reaches end and its UPDATE op,
772*4882a593Smuzhiyun * issue SSS_HASH_PAUSE and wait for HPART irq
773*4882a593Smuzhiyun */
774*4882a593Smuzhiyun if (hx_end)
775*4882a593Smuzhiyun tasklet_schedule(&dev->hash_tasklet);
776*4882a593Smuzhiyun else if (err_dma_hx == 2)
777*4882a593Smuzhiyun s5p_hash_write(dev, SSS_REG_HASH_CTRL_PAUSE,
778*4882a593Smuzhiyun SSS_HASH_PAUSE);
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun return IRQ_HANDLED;
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun /**
784*4882a593Smuzhiyun * s5p_hash_read_msg() - read message or IV from HW
785*4882a593Smuzhiyun * @req: AHASH request
786*4882a593Smuzhiyun */
s5p_hash_read_msg(struct ahash_request * req)787*4882a593Smuzhiyun static void s5p_hash_read_msg(struct ahash_request *req)
788*4882a593Smuzhiyun {
789*4882a593Smuzhiyun struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
790*4882a593Smuzhiyun struct s5p_aes_dev *dd = ctx->dd;
791*4882a593Smuzhiyun u32 *hash = (u32 *)ctx->digest;
792*4882a593Smuzhiyun unsigned int i;
793*4882a593Smuzhiyun
794*4882a593Smuzhiyun for (i = 0; i < ctx->nregs; i++)
795*4882a593Smuzhiyun hash[i] = s5p_hash_read(dd, SSS_REG_HASH_OUT(i));
796*4882a593Smuzhiyun }
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun /**
799*4882a593Smuzhiyun * s5p_hash_write_ctx_iv() - write IV for next partial/finup op.
800*4882a593Smuzhiyun * @dd: device
801*4882a593Smuzhiyun * @ctx: request context
802*4882a593Smuzhiyun */
s5p_hash_write_ctx_iv(struct s5p_aes_dev * dd,const struct s5p_hash_reqctx * ctx)803*4882a593Smuzhiyun static void s5p_hash_write_ctx_iv(struct s5p_aes_dev *dd,
804*4882a593Smuzhiyun const struct s5p_hash_reqctx *ctx)
805*4882a593Smuzhiyun {
806*4882a593Smuzhiyun const u32 *hash = (const u32 *)ctx->digest;
807*4882a593Smuzhiyun unsigned int i;
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun for (i = 0; i < ctx->nregs; i++)
810*4882a593Smuzhiyun s5p_hash_write(dd, SSS_REG_HASH_IV(i), hash[i]);
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun /**
814*4882a593Smuzhiyun * s5p_hash_write_iv() - write IV for next partial/finup op.
815*4882a593Smuzhiyun * @req: AHASH request
816*4882a593Smuzhiyun */
s5p_hash_write_iv(struct ahash_request * req)817*4882a593Smuzhiyun static void s5p_hash_write_iv(struct ahash_request *req)
818*4882a593Smuzhiyun {
819*4882a593Smuzhiyun struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun s5p_hash_write_ctx_iv(ctx->dd, ctx);
822*4882a593Smuzhiyun }
823*4882a593Smuzhiyun
824*4882a593Smuzhiyun /**
825*4882a593Smuzhiyun * s5p_hash_copy_result() - copy digest into req->result
826*4882a593Smuzhiyun * @req: AHASH request
827*4882a593Smuzhiyun */
s5p_hash_copy_result(struct ahash_request * req)828*4882a593Smuzhiyun static void s5p_hash_copy_result(struct ahash_request *req)
829*4882a593Smuzhiyun {
830*4882a593Smuzhiyun const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun if (!req->result)
833*4882a593Smuzhiyun return;
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun memcpy(req->result, ctx->digest, ctx->nregs * HASH_REG_SIZEOF);
836*4882a593Smuzhiyun }
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun /**
839*4882a593Smuzhiyun * s5p_hash_dma_flush() - flush HASH DMA
840*4882a593Smuzhiyun * @dev: secss device
841*4882a593Smuzhiyun */
s5p_hash_dma_flush(struct s5p_aes_dev * dev)842*4882a593Smuzhiyun static void s5p_hash_dma_flush(struct s5p_aes_dev *dev)
843*4882a593Smuzhiyun {
844*4882a593Smuzhiyun SSS_WRITE(dev, FCHRDMAC, SSS_FCHRDMAC_FLUSH);
845*4882a593Smuzhiyun }
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun /**
848*4882a593Smuzhiyun * s5p_hash_dma_enable() - enable DMA mode for HASH
849*4882a593Smuzhiyun * @dev: secss device
850*4882a593Smuzhiyun *
851*4882a593Smuzhiyun * enable DMA mode for HASH
852*4882a593Smuzhiyun */
s5p_hash_dma_enable(struct s5p_aes_dev * dev)853*4882a593Smuzhiyun static void s5p_hash_dma_enable(struct s5p_aes_dev *dev)
854*4882a593Smuzhiyun {
855*4882a593Smuzhiyun s5p_hash_write(dev, SSS_REG_HASH_CTRL_FIFO, SSS_HASH_FIFO_MODE_DMA);
856*4882a593Smuzhiyun }
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun /**
859*4882a593Smuzhiyun * s5p_hash_irq_disable() - disable irq HASH signals
860*4882a593Smuzhiyun * @dev: secss device
861*4882a593Smuzhiyun * @flags: bitfield with irq's to be disabled
862*4882a593Smuzhiyun */
s5p_hash_irq_disable(struct s5p_aes_dev * dev,u32 flags)863*4882a593Smuzhiyun static void s5p_hash_irq_disable(struct s5p_aes_dev *dev, u32 flags)
864*4882a593Smuzhiyun {
865*4882a593Smuzhiyun SSS_WRITE(dev, FCINTENCLR, flags);
866*4882a593Smuzhiyun }
867*4882a593Smuzhiyun
868*4882a593Smuzhiyun /**
869*4882a593Smuzhiyun * s5p_hash_irq_enable() - enable irq signals
870*4882a593Smuzhiyun * @dev: secss device
871*4882a593Smuzhiyun * @flags: bitfield with irq's to be enabled
872*4882a593Smuzhiyun */
s5p_hash_irq_enable(struct s5p_aes_dev * dev,int flags)873*4882a593Smuzhiyun static void s5p_hash_irq_enable(struct s5p_aes_dev *dev, int flags)
874*4882a593Smuzhiyun {
875*4882a593Smuzhiyun SSS_WRITE(dev, FCINTENSET, flags);
876*4882a593Smuzhiyun }
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun /**
879*4882a593Smuzhiyun * s5p_hash_set_flow() - set flow inside SecSS AES/DES with/without HASH
880*4882a593Smuzhiyun * @dev: secss device
881*4882a593Smuzhiyun * @hashflow: HASH stream flow with/without crypto AES/DES
882*4882a593Smuzhiyun */
s5p_hash_set_flow(struct s5p_aes_dev * dev,u32 hashflow)883*4882a593Smuzhiyun static void s5p_hash_set_flow(struct s5p_aes_dev *dev, u32 hashflow)
884*4882a593Smuzhiyun {
885*4882a593Smuzhiyun unsigned long flags;
886*4882a593Smuzhiyun u32 flow;
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun spin_lock_irqsave(&dev->lock, flags);
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun flow = SSS_READ(dev, FCFIFOCTRL);
891*4882a593Smuzhiyun flow &= ~SSS_HASHIN_MASK;
892*4882a593Smuzhiyun flow |= hashflow;
893*4882a593Smuzhiyun SSS_WRITE(dev, FCFIFOCTRL, flow);
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->lock, flags);
896*4882a593Smuzhiyun }
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun /**
899*4882a593Smuzhiyun * s5p_ahash_dma_init() - enable DMA and set HASH flow inside SecSS
900*4882a593Smuzhiyun * @dev: secss device
901*4882a593Smuzhiyun * @hashflow: HASH stream flow with/without AES/DES
902*4882a593Smuzhiyun *
903*4882a593Smuzhiyun * flush HASH DMA and enable DMA, set HASH stream flow inside SecSS HW,
904*4882a593Smuzhiyun * enable HASH irq's HRDMA, HDONE, HPART
905*4882a593Smuzhiyun */
s5p_ahash_dma_init(struct s5p_aes_dev * dev,u32 hashflow)906*4882a593Smuzhiyun static void s5p_ahash_dma_init(struct s5p_aes_dev *dev, u32 hashflow)
907*4882a593Smuzhiyun {
908*4882a593Smuzhiyun s5p_hash_irq_disable(dev, SSS_FCINTENCLR_HRDMAINTENCLR |
909*4882a593Smuzhiyun SSS_FCINTENCLR_HDONEINTENCLR |
910*4882a593Smuzhiyun SSS_FCINTENCLR_HPARTINTENCLR);
911*4882a593Smuzhiyun s5p_hash_dma_flush(dev);
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun s5p_hash_dma_enable(dev);
914*4882a593Smuzhiyun s5p_hash_set_flow(dev, hashflow & SSS_HASHIN_MASK);
915*4882a593Smuzhiyun s5p_hash_irq_enable(dev, SSS_FCINTENSET_HRDMAINTENSET |
916*4882a593Smuzhiyun SSS_FCINTENSET_HDONEINTENSET |
917*4882a593Smuzhiyun SSS_FCINTENSET_HPARTINTENSET);
918*4882a593Smuzhiyun }
919*4882a593Smuzhiyun
920*4882a593Smuzhiyun /**
921*4882a593Smuzhiyun * s5p_hash_write_ctrl() - prepare HASH block in SecSS for processing
922*4882a593Smuzhiyun * @dd: secss device
923*4882a593Smuzhiyun * @length: length for request
924*4882a593Smuzhiyun * @final: true if final op
925*4882a593Smuzhiyun *
926*4882a593Smuzhiyun * Prepare SSS HASH block for processing bytes in DMA mode. If it is called
927*4882a593Smuzhiyun * after previous updates, fill up IV words. For final, calculate and set
928*4882a593Smuzhiyun * lengths for HASH so SecSS can finalize hash. For partial, set SSS HASH
929*4882a593Smuzhiyun * length as 2^63 so it will be never reached and set to zero prelow and
930*4882a593Smuzhiyun * prehigh.
931*4882a593Smuzhiyun *
932*4882a593Smuzhiyun * This function does not start DMA transfer.
933*4882a593Smuzhiyun */
s5p_hash_write_ctrl(struct s5p_aes_dev * dd,size_t length,bool final)934*4882a593Smuzhiyun static void s5p_hash_write_ctrl(struct s5p_aes_dev *dd, size_t length,
935*4882a593Smuzhiyun bool final)
936*4882a593Smuzhiyun {
937*4882a593Smuzhiyun struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
938*4882a593Smuzhiyun u32 prelow, prehigh, low, high;
939*4882a593Smuzhiyun u32 configflags, swapflags;
940*4882a593Smuzhiyun u64 tmplen;
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun configflags = ctx->engine | SSS_HASH_INIT_BIT;
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun if (likely(ctx->digcnt)) {
945*4882a593Smuzhiyun s5p_hash_write_ctx_iv(dd, ctx);
946*4882a593Smuzhiyun configflags |= SSS_HASH_USER_IV_EN;
947*4882a593Smuzhiyun }
948*4882a593Smuzhiyun
949*4882a593Smuzhiyun if (final) {
950*4882a593Smuzhiyun /* number of bytes for last part */
951*4882a593Smuzhiyun low = length;
952*4882a593Smuzhiyun high = 0;
953*4882a593Smuzhiyun /* total number of bits prev hashed */
954*4882a593Smuzhiyun tmplen = ctx->digcnt * 8;
955*4882a593Smuzhiyun prelow = (u32)tmplen;
956*4882a593Smuzhiyun prehigh = (u32)(tmplen >> 32);
957*4882a593Smuzhiyun } else {
958*4882a593Smuzhiyun prelow = 0;
959*4882a593Smuzhiyun prehigh = 0;
960*4882a593Smuzhiyun low = 0;
961*4882a593Smuzhiyun high = BIT(31);
962*4882a593Smuzhiyun }
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun swapflags = SSS_HASH_BYTESWAP_DI | SSS_HASH_BYTESWAP_DO |
965*4882a593Smuzhiyun SSS_HASH_BYTESWAP_IV | SSS_HASH_BYTESWAP_KEY;
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_LOW, low);
968*4882a593Smuzhiyun s5p_hash_write(dd, SSS_REG_HASH_MSG_SIZE_HIGH, high);
969*4882a593Smuzhiyun s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_LOW, prelow);
970*4882a593Smuzhiyun s5p_hash_write(dd, SSS_REG_HASH_PRE_MSG_SIZE_HIGH, prehigh);
971*4882a593Smuzhiyun
972*4882a593Smuzhiyun s5p_hash_write(dd, SSS_REG_HASH_CTRL_SWAP, swapflags);
973*4882a593Smuzhiyun s5p_hash_write(dd, SSS_REG_HASH_CTRL, configflags);
974*4882a593Smuzhiyun }
975*4882a593Smuzhiyun
976*4882a593Smuzhiyun /**
977*4882a593Smuzhiyun * s5p_hash_xmit_dma() - start DMA hash processing
978*4882a593Smuzhiyun * @dd: secss device
979*4882a593Smuzhiyun * @length: length for request
980*4882a593Smuzhiyun * @final: true if final op
981*4882a593Smuzhiyun *
982*4882a593Smuzhiyun * Update digcnt here, as it is needed for finup/final op.
983*4882a593Smuzhiyun */
s5p_hash_xmit_dma(struct s5p_aes_dev * dd,size_t length,bool final)984*4882a593Smuzhiyun static int s5p_hash_xmit_dma(struct s5p_aes_dev *dd, size_t length,
985*4882a593Smuzhiyun bool final)
986*4882a593Smuzhiyun {
987*4882a593Smuzhiyun struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
988*4882a593Smuzhiyun unsigned int cnt;
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun cnt = dma_map_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
991*4882a593Smuzhiyun if (!cnt) {
992*4882a593Smuzhiyun dev_err(dd->dev, "dma_map_sg error\n");
993*4882a593Smuzhiyun ctx->error = true;
994*4882a593Smuzhiyun return -EINVAL;
995*4882a593Smuzhiyun }
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun set_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
998*4882a593Smuzhiyun dd->hash_sg_iter = ctx->sg;
999*4882a593Smuzhiyun dd->hash_sg_cnt = cnt;
1000*4882a593Smuzhiyun s5p_hash_write_ctrl(dd, length, final);
1001*4882a593Smuzhiyun ctx->digcnt += length;
1002*4882a593Smuzhiyun ctx->total -= length;
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyun /* catch last interrupt */
1005*4882a593Smuzhiyun if (final)
1006*4882a593Smuzhiyun set_bit(HASH_FLAGS_FINAL, &dd->hash_flags);
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun s5p_set_dma_hashdata(dd, dd->hash_sg_iter); /* DMA starts */
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun return -EINPROGRESS;
1011*4882a593Smuzhiyun }
1012*4882a593Smuzhiyun
1013*4882a593Smuzhiyun /**
1014*4882a593Smuzhiyun * s5p_hash_copy_sgs() - copy request's bytes into new buffer
1015*4882a593Smuzhiyun * @ctx: request context
1016*4882a593Smuzhiyun * @sg: source scatterlist request
1017*4882a593Smuzhiyun * @new_len: number of bytes to process from sg
1018*4882a593Smuzhiyun *
1019*4882a593Smuzhiyun * Allocate new buffer, copy data for HASH into it. If there was xmit_buf
1020*4882a593Smuzhiyun * filled, copy it first, then copy data from sg into it. Prepare one sgl[0]
1021*4882a593Smuzhiyun * with allocated buffer.
1022*4882a593Smuzhiyun *
1023*4882a593Smuzhiyun * Set bit in dd->hash_flag so we can free it after irq ends processing.
1024*4882a593Smuzhiyun */
s5p_hash_copy_sgs(struct s5p_hash_reqctx * ctx,struct scatterlist * sg,unsigned int new_len)1025*4882a593Smuzhiyun static int s5p_hash_copy_sgs(struct s5p_hash_reqctx *ctx,
1026*4882a593Smuzhiyun struct scatterlist *sg, unsigned int new_len)
1027*4882a593Smuzhiyun {
1028*4882a593Smuzhiyun unsigned int pages, len;
1029*4882a593Smuzhiyun void *buf;
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun len = new_len + ctx->bufcnt;
1032*4882a593Smuzhiyun pages = get_order(len);
1033*4882a593Smuzhiyun
1034*4882a593Smuzhiyun buf = (void *)__get_free_pages(GFP_ATOMIC, pages);
1035*4882a593Smuzhiyun if (!buf) {
1036*4882a593Smuzhiyun dev_err(ctx->dd->dev, "alloc pages for unaligned case.\n");
1037*4882a593Smuzhiyun ctx->error = true;
1038*4882a593Smuzhiyun return -ENOMEM;
1039*4882a593Smuzhiyun }
1040*4882a593Smuzhiyun
1041*4882a593Smuzhiyun if (ctx->bufcnt)
1042*4882a593Smuzhiyun memcpy(buf, ctx->dd->xmit_buf, ctx->bufcnt);
1043*4882a593Smuzhiyun
1044*4882a593Smuzhiyun scatterwalk_map_and_copy(buf + ctx->bufcnt, sg, ctx->skip,
1045*4882a593Smuzhiyun new_len, 0);
1046*4882a593Smuzhiyun sg_init_table(ctx->sgl, 1);
1047*4882a593Smuzhiyun sg_set_buf(ctx->sgl, buf, len);
1048*4882a593Smuzhiyun ctx->sg = ctx->sgl;
1049*4882a593Smuzhiyun ctx->sg_len = 1;
1050*4882a593Smuzhiyun ctx->bufcnt = 0;
1051*4882a593Smuzhiyun ctx->skip = 0;
1052*4882a593Smuzhiyun set_bit(HASH_FLAGS_SGS_COPIED, &ctx->dd->hash_flags);
1053*4882a593Smuzhiyun
1054*4882a593Smuzhiyun return 0;
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun
1057*4882a593Smuzhiyun /**
1058*4882a593Smuzhiyun * s5p_hash_copy_sg_lists() - copy sg list and make fixes in copy
1059*4882a593Smuzhiyun * @ctx: request context
1060*4882a593Smuzhiyun * @sg: source scatterlist request
1061*4882a593Smuzhiyun * @new_len: number of bytes to process from sg
1062*4882a593Smuzhiyun *
1063*4882a593Smuzhiyun * Allocate new scatterlist table, copy data for HASH into it. If there was
1064*4882a593Smuzhiyun * xmit_buf filled, prepare it first, then copy page, length and offset from
1065*4882a593Smuzhiyun * source sg into it, adjusting begin and/or end for skip offset and
1066*4882a593Smuzhiyun * hash_later value.
1067*4882a593Smuzhiyun *
1068*4882a593Smuzhiyun * Resulting sg table will be assigned to ctx->sg. Set flag so we can free
1069*4882a593Smuzhiyun * it after irq ends processing.
1070*4882a593Smuzhiyun */
s5p_hash_copy_sg_lists(struct s5p_hash_reqctx * ctx,struct scatterlist * sg,unsigned int new_len)1071*4882a593Smuzhiyun static int s5p_hash_copy_sg_lists(struct s5p_hash_reqctx *ctx,
1072*4882a593Smuzhiyun struct scatterlist *sg, unsigned int new_len)
1073*4882a593Smuzhiyun {
1074*4882a593Smuzhiyun unsigned int skip = ctx->skip, n = sg_nents(sg);
1075*4882a593Smuzhiyun struct scatterlist *tmp;
1076*4882a593Smuzhiyun unsigned int len;
1077*4882a593Smuzhiyun
1078*4882a593Smuzhiyun if (ctx->bufcnt)
1079*4882a593Smuzhiyun n++;
1080*4882a593Smuzhiyun
1081*4882a593Smuzhiyun ctx->sg = kmalloc_array(n, sizeof(*sg), GFP_KERNEL);
1082*4882a593Smuzhiyun if (!ctx->sg) {
1083*4882a593Smuzhiyun ctx->error = true;
1084*4882a593Smuzhiyun return -ENOMEM;
1085*4882a593Smuzhiyun }
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun sg_init_table(ctx->sg, n);
1088*4882a593Smuzhiyun
1089*4882a593Smuzhiyun tmp = ctx->sg;
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun ctx->sg_len = 0;
1092*4882a593Smuzhiyun
1093*4882a593Smuzhiyun if (ctx->bufcnt) {
1094*4882a593Smuzhiyun sg_set_buf(tmp, ctx->dd->xmit_buf, ctx->bufcnt);
1095*4882a593Smuzhiyun tmp = sg_next(tmp);
1096*4882a593Smuzhiyun ctx->sg_len++;
1097*4882a593Smuzhiyun }
1098*4882a593Smuzhiyun
1099*4882a593Smuzhiyun while (sg && skip >= sg->length) {
1100*4882a593Smuzhiyun skip -= sg->length;
1101*4882a593Smuzhiyun sg = sg_next(sg);
1102*4882a593Smuzhiyun }
1103*4882a593Smuzhiyun
1104*4882a593Smuzhiyun while (sg && new_len) {
1105*4882a593Smuzhiyun len = sg->length - skip;
1106*4882a593Smuzhiyun if (new_len < len)
1107*4882a593Smuzhiyun len = new_len;
1108*4882a593Smuzhiyun
1109*4882a593Smuzhiyun new_len -= len;
1110*4882a593Smuzhiyun sg_set_page(tmp, sg_page(sg), len, sg->offset + skip);
1111*4882a593Smuzhiyun skip = 0;
1112*4882a593Smuzhiyun if (new_len <= 0)
1113*4882a593Smuzhiyun sg_mark_end(tmp);
1114*4882a593Smuzhiyun
1115*4882a593Smuzhiyun tmp = sg_next(tmp);
1116*4882a593Smuzhiyun ctx->sg_len++;
1117*4882a593Smuzhiyun sg = sg_next(sg);
1118*4882a593Smuzhiyun }
1119*4882a593Smuzhiyun
1120*4882a593Smuzhiyun set_bit(HASH_FLAGS_SGS_ALLOCED, &ctx->dd->hash_flags);
1121*4882a593Smuzhiyun
1122*4882a593Smuzhiyun return 0;
1123*4882a593Smuzhiyun }
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun /**
1126*4882a593Smuzhiyun * s5p_hash_prepare_sgs() - prepare sg for processing
1127*4882a593Smuzhiyun * @ctx: request context
1128*4882a593Smuzhiyun * @sg: source scatterlist request
1129*4882a593Smuzhiyun * @new_len: number of bytes to process from sg
1130*4882a593Smuzhiyun * @final: final flag
1131*4882a593Smuzhiyun *
1132*4882a593Smuzhiyun * Check two conditions: (1) if buffers in sg have len aligned data, and (2)
1133*4882a593Smuzhiyun * sg table have good aligned elements (list_ok). If one of this checks fails,
1134*4882a593Smuzhiyun * then either (1) allocates new buffer for data with s5p_hash_copy_sgs, copy
1135*4882a593Smuzhiyun * data into this buffer and prepare request in sgl, or (2) allocates new sg
1136*4882a593Smuzhiyun * table and prepare sg elements.
1137*4882a593Smuzhiyun *
1138*4882a593Smuzhiyun * For digest or finup all conditions can be good, and we may not need any
1139*4882a593Smuzhiyun * fixes.
1140*4882a593Smuzhiyun */
s5p_hash_prepare_sgs(struct s5p_hash_reqctx * ctx,struct scatterlist * sg,unsigned int new_len,bool final)1141*4882a593Smuzhiyun static int s5p_hash_prepare_sgs(struct s5p_hash_reqctx *ctx,
1142*4882a593Smuzhiyun struct scatterlist *sg,
1143*4882a593Smuzhiyun unsigned int new_len, bool final)
1144*4882a593Smuzhiyun {
1145*4882a593Smuzhiyun unsigned int skip = ctx->skip, nbytes = new_len, n = 0;
1146*4882a593Smuzhiyun bool aligned = true, list_ok = true;
1147*4882a593Smuzhiyun struct scatterlist *sg_tmp = sg;
1148*4882a593Smuzhiyun
1149*4882a593Smuzhiyun if (!sg || !sg->length || !new_len)
1150*4882a593Smuzhiyun return 0;
1151*4882a593Smuzhiyun
1152*4882a593Smuzhiyun if (skip || !final)
1153*4882a593Smuzhiyun list_ok = false;
1154*4882a593Smuzhiyun
1155*4882a593Smuzhiyun while (nbytes > 0 && sg_tmp) {
1156*4882a593Smuzhiyun n++;
1157*4882a593Smuzhiyun if (skip >= sg_tmp->length) {
1158*4882a593Smuzhiyun skip -= sg_tmp->length;
1159*4882a593Smuzhiyun if (!sg_tmp->length) {
1160*4882a593Smuzhiyun aligned = false;
1161*4882a593Smuzhiyun break;
1162*4882a593Smuzhiyun }
1163*4882a593Smuzhiyun } else {
1164*4882a593Smuzhiyun if (!IS_ALIGNED(sg_tmp->length - skip, BUFLEN)) {
1165*4882a593Smuzhiyun aligned = false;
1166*4882a593Smuzhiyun break;
1167*4882a593Smuzhiyun }
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyun if (nbytes < sg_tmp->length - skip) {
1170*4882a593Smuzhiyun list_ok = false;
1171*4882a593Smuzhiyun break;
1172*4882a593Smuzhiyun }
1173*4882a593Smuzhiyun
1174*4882a593Smuzhiyun nbytes -= sg_tmp->length - skip;
1175*4882a593Smuzhiyun skip = 0;
1176*4882a593Smuzhiyun }
1177*4882a593Smuzhiyun
1178*4882a593Smuzhiyun sg_tmp = sg_next(sg_tmp);
1179*4882a593Smuzhiyun }
1180*4882a593Smuzhiyun
1181*4882a593Smuzhiyun if (!aligned)
1182*4882a593Smuzhiyun return s5p_hash_copy_sgs(ctx, sg, new_len);
1183*4882a593Smuzhiyun else if (!list_ok)
1184*4882a593Smuzhiyun return s5p_hash_copy_sg_lists(ctx, sg, new_len);
1185*4882a593Smuzhiyun
1186*4882a593Smuzhiyun /*
1187*4882a593Smuzhiyun * Have aligned data from previous operation and/or current
1188*4882a593Smuzhiyun * Note: will enter here only if (digest or finup) and aligned
1189*4882a593Smuzhiyun */
1190*4882a593Smuzhiyun if (ctx->bufcnt) {
1191*4882a593Smuzhiyun ctx->sg_len = n;
1192*4882a593Smuzhiyun sg_init_table(ctx->sgl, 2);
1193*4882a593Smuzhiyun sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, ctx->bufcnt);
1194*4882a593Smuzhiyun sg_chain(ctx->sgl, 2, sg);
1195*4882a593Smuzhiyun ctx->sg = ctx->sgl;
1196*4882a593Smuzhiyun ctx->sg_len++;
1197*4882a593Smuzhiyun } else {
1198*4882a593Smuzhiyun ctx->sg = sg;
1199*4882a593Smuzhiyun ctx->sg_len = n;
1200*4882a593Smuzhiyun }
1201*4882a593Smuzhiyun
1202*4882a593Smuzhiyun return 0;
1203*4882a593Smuzhiyun }
1204*4882a593Smuzhiyun
1205*4882a593Smuzhiyun /**
1206*4882a593Smuzhiyun * s5p_hash_prepare_request() - prepare request for processing
1207*4882a593Smuzhiyun * @req: AHASH request
1208*4882a593Smuzhiyun * @update: true if UPDATE op
1209*4882a593Smuzhiyun *
1210*4882a593Smuzhiyun * Note 1: we can have update flag _and_ final flag at the same time.
1211*4882a593Smuzhiyun * Note 2: we enter here when digcnt > BUFLEN (=HASH_BLOCK_SIZE) or
1212*4882a593Smuzhiyun * either req->nbytes or ctx->bufcnt + req->nbytes is > BUFLEN or
1213*4882a593Smuzhiyun * we have final op
1214*4882a593Smuzhiyun */
s5p_hash_prepare_request(struct ahash_request * req,bool update)1215*4882a593Smuzhiyun static int s5p_hash_prepare_request(struct ahash_request *req, bool update)
1216*4882a593Smuzhiyun {
1217*4882a593Smuzhiyun struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1218*4882a593Smuzhiyun bool final = ctx->finup;
1219*4882a593Smuzhiyun int xmit_len, hash_later, nbytes;
1220*4882a593Smuzhiyun int ret;
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun if (update)
1223*4882a593Smuzhiyun nbytes = req->nbytes;
1224*4882a593Smuzhiyun else
1225*4882a593Smuzhiyun nbytes = 0;
1226*4882a593Smuzhiyun
1227*4882a593Smuzhiyun ctx->total = nbytes + ctx->bufcnt;
1228*4882a593Smuzhiyun if (!ctx->total)
1229*4882a593Smuzhiyun return 0;
1230*4882a593Smuzhiyun
1231*4882a593Smuzhiyun if (nbytes && (!IS_ALIGNED(ctx->bufcnt, BUFLEN))) {
1232*4882a593Smuzhiyun /* bytes left from previous request, so fill up to BUFLEN */
1233*4882a593Smuzhiyun int len = BUFLEN - ctx->bufcnt % BUFLEN;
1234*4882a593Smuzhiyun
1235*4882a593Smuzhiyun if (len > nbytes)
1236*4882a593Smuzhiyun len = nbytes;
1237*4882a593Smuzhiyun
1238*4882a593Smuzhiyun scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
1239*4882a593Smuzhiyun 0, len, 0);
1240*4882a593Smuzhiyun ctx->bufcnt += len;
1241*4882a593Smuzhiyun nbytes -= len;
1242*4882a593Smuzhiyun ctx->skip = len;
1243*4882a593Smuzhiyun } else {
1244*4882a593Smuzhiyun ctx->skip = 0;
1245*4882a593Smuzhiyun }
1246*4882a593Smuzhiyun
1247*4882a593Smuzhiyun if (ctx->bufcnt)
1248*4882a593Smuzhiyun memcpy(ctx->dd->xmit_buf, ctx->buffer, ctx->bufcnt);
1249*4882a593Smuzhiyun
1250*4882a593Smuzhiyun xmit_len = ctx->total;
1251*4882a593Smuzhiyun if (final) {
1252*4882a593Smuzhiyun hash_later = 0;
1253*4882a593Smuzhiyun } else {
1254*4882a593Smuzhiyun if (IS_ALIGNED(xmit_len, BUFLEN))
1255*4882a593Smuzhiyun xmit_len -= BUFLEN;
1256*4882a593Smuzhiyun else
1257*4882a593Smuzhiyun xmit_len -= xmit_len & (BUFLEN - 1);
1258*4882a593Smuzhiyun
1259*4882a593Smuzhiyun hash_later = ctx->total - xmit_len;
1260*4882a593Smuzhiyun /* copy hash_later bytes from end of req->src */
1261*4882a593Smuzhiyun /* previous bytes are in xmit_buf, so no overwrite */
1262*4882a593Smuzhiyun scatterwalk_map_and_copy(ctx->buffer, req->src,
1263*4882a593Smuzhiyun req->nbytes - hash_later,
1264*4882a593Smuzhiyun hash_later, 0);
1265*4882a593Smuzhiyun }
1266*4882a593Smuzhiyun
1267*4882a593Smuzhiyun if (xmit_len > BUFLEN) {
1268*4882a593Smuzhiyun ret = s5p_hash_prepare_sgs(ctx, req->src, nbytes - hash_later,
1269*4882a593Smuzhiyun final);
1270*4882a593Smuzhiyun if (ret)
1271*4882a593Smuzhiyun return ret;
1272*4882a593Smuzhiyun } else {
1273*4882a593Smuzhiyun /* have buffered data only */
1274*4882a593Smuzhiyun if (unlikely(!ctx->bufcnt)) {
1275*4882a593Smuzhiyun /* first update didn't fill up buffer */
1276*4882a593Smuzhiyun scatterwalk_map_and_copy(ctx->dd->xmit_buf, req->src,
1277*4882a593Smuzhiyun 0, xmit_len, 0);
1278*4882a593Smuzhiyun }
1279*4882a593Smuzhiyun
1280*4882a593Smuzhiyun sg_init_table(ctx->sgl, 1);
1281*4882a593Smuzhiyun sg_set_buf(ctx->sgl, ctx->dd->xmit_buf, xmit_len);
1282*4882a593Smuzhiyun
1283*4882a593Smuzhiyun ctx->sg = ctx->sgl;
1284*4882a593Smuzhiyun ctx->sg_len = 1;
1285*4882a593Smuzhiyun }
1286*4882a593Smuzhiyun
1287*4882a593Smuzhiyun ctx->bufcnt = hash_later;
1288*4882a593Smuzhiyun if (!final)
1289*4882a593Smuzhiyun ctx->total = xmit_len;
1290*4882a593Smuzhiyun
1291*4882a593Smuzhiyun return 0;
1292*4882a593Smuzhiyun }
1293*4882a593Smuzhiyun
1294*4882a593Smuzhiyun /**
1295*4882a593Smuzhiyun * s5p_hash_update_dma_stop() - unmap DMA
1296*4882a593Smuzhiyun * @dd: secss device
1297*4882a593Smuzhiyun *
1298*4882a593Smuzhiyun * Unmap scatterlist ctx->sg.
1299*4882a593Smuzhiyun */
s5p_hash_update_dma_stop(struct s5p_aes_dev * dd)1300*4882a593Smuzhiyun static void s5p_hash_update_dma_stop(struct s5p_aes_dev *dd)
1301*4882a593Smuzhiyun {
1302*4882a593Smuzhiyun const struct s5p_hash_reqctx *ctx = ahash_request_ctx(dd->hash_req);
1303*4882a593Smuzhiyun
1304*4882a593Smuzhiyun dma_unmap_sg(dd->dev, ctx->sg, ctx->sg_len, DMA_TO_DEVICE);
1305*4882a593Smuzhiyun clear_bit(HASH_FLAGS_DMA_ACTIVE, &dd->hash_flags);
1306*4882a593Smuzhiyun }
1307*4882a593Smuzhiyun
1308*4882a593Smuzhiyun /**
1309*4882a593Smuzhiyun * s5p_hash_finish() - copy calculated digest to crypto layer
1310*4882a593Smuzhiyun * @req: AHASH request
1311*4882a593Smuzhiyun */
s5p_hash_finish(struct ahash_request * req)1312*4882a593Smuzhiyun static void s5p_hash_finish(struct ahash_request *req)
1313*4882a593Smuzhiyun {
1314*4882a593Smuzhiyun struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1315*4882a593Smuzhiyun struct s5p_aes_dev *dd = ctx->dd;
1316*4882a593Smuzhiyun
1317*4882a593Smuzhiyun if (ctx->digcnt)
1318*4882a593Smuzhiyun s5p_hash_copy_result(req);
1319*4882a593Smuzhiyun
1320*4882a593Smuzhiyun dev_dbg(dd->dev, "hash_finish digcnt: %lld\n", ctx->digcnt);
1321*4882a593Smuzhiyun }
1322*4882a593Smuzhiyun
1323*4882a593Smuzhiyun /**
1324*4882a593Smuzhiyun * s5p_hash_finish_req() - finish request
1325*4882a593Smuzhiyun * @req: AHASH request
1326*4882a593Smuzhiyun * @err: error
1327*4882a593Smuzhiyun */
s5p_hash_finish_req(struct ahash_request * req,int err)1328*4882a593Smuzhiyun static void s5p_hash_finish_req(struct ahash_request *req, int err)
1329*4882a593Smuzhiyun {
1330*4882a593Smuzhiyun struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1331*4882a593Smuzhiyun struct s5p_aes_dev *dd = ctx->dd;
1332*4882a593Smuzhiyun unsigned long flags;
1333*4882a593Smuzhiyun
1334*4882a593Smuzhiyun if (test_bit(HASH_FLAGS_SGS_COPIED, &dd->hash_flags))
1335*4882a593Smuzhiyun free_pages((unsigned long)sg_virt(ctx->sg),
1336*4882a593Smuzhiyun get_order(ctx->sg->length));
1337*4882a593Smuzhiyun
1338*4882a593Smuzhiyun if (test_bit(HASH_FLAGS_SGS_ALLOCED, &dd->hash_flags))
1339*4882a593Smuzhiyun kfree(ctx->sg);
1340*4882a593Smuzhiyun
1341*4882a593Smuzhiyun ctx->sg = NULL;
1342*4882a593Smuzhiyun dd->hash_flags &= ~(BIT(HASH_FLAGS_SGS_ALLOCED) |
1343*4882a593Smuzhiyun BIT(HASH_FLAGS_SGS_COPIED));
1344*4882a593Smuzhiyun
1345*4882a593Smuzhiyun if (!err && !ctx->error) {
1346*4882a593Smuzhiyun s5p_hash_read_msg(req);
1347*4882a593Smuzhiyun if (test_bit(HASH_FLAGS_FINAL, &dd->hash_flags))
1348*4882a593Smuzhiyun s5p_hash_finish(req);
1349*4882a593Smuzhiyun } else {
1350*4882a593Smuzhiyun ctx->error = true;
1351*4882a593Smuzhiyun }
1352*4882a593Smuzhiyun
1353*4882a593Smuzhiyun spin_lock_irqsave(&dd->hash_lock, flags);
1354*4882a593Smuzhiyun dd->hash_flags &= ~(BIT(HASH_FLAGS_BUSY) | BIT(HASH_FLAGS_FINAL) |
1355*4882a593Smuzhiyun BIT(HASH_FLAGS_DMA_READY) |
1356*4882a593Smuzhiyun BIT(HASH_FLAGS_OUTPUT_READY));
1357*4882a593Smuzhiyun spin_unlock_irqrestore(&dd->hash_lock, flags);
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun if (req->base.complete)
1360*4882a593Smuzhiyun req->base.complete(&req->base, err);
1361*4882a593Smuzhiyun }
1362*4882a593Smuzhiyun
1363*4882a593Smuzhiyun /**
1364*4882a593Smuzhiyun * s5p_hash_handle_queue() - handle hash queue
1365*4882a593Smuzhiyun * @dd: device s5p_aes_dev
1366*4882a593Smuzhiyun * @req: AHASH request
1367*4882a593Smuzhiyun *
1368*4882a593Smuzhiyun * If req!=NULL enqueue it on dd->queue, if FLAGS_BUSY is not set on the
1369*4882a593Smuzhiyun * device then processes the first request from the dd->queue
1370*4882a593Smuzhiyun *
1371*4882a593Smuzhiyun * Returns: see s5p_hash_final below.
1372*4882a593Smuzhiyun */
s5p_hash_handle_queue(struct s5p_aes_dev * dd,struct ahash_request * req)1373*4882a593Smuzhiyun static int s5p_hash_handle_queue(struct s5p_aes_dev *dd,
1374*4882a593Smuzhiyun struct ahash_request *req)
1375*4882a593Smuzhiyun {
1376*4882a593Smuzhiyun struct crypto_async_request *async_req, *backlog;
1377*4882a593Smuzhiyun struct s5p_hash_reqctx *ctx;
1378*4882a593Smuzhiyun unsigned long flags;
1379*4882a593Smuzhiyun int err = 0, ret = 0;
1380*4882a593Smuzhiyun
1381*4882a593Smuzhiyun retry:
1382*4882a593Smuzhiyun spin_lock_irqsave(&dd->hash_lock, flags);
1383*4882a593Smuzhiyun if (req)
1384*4882a593Smuzhiyun ret = ahash_enqueue_request(&dd->hash_queue, req);
1385*4882a593Smuzhiyun
1386*4882a593Smuzhiyun if (test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) {
1387*4882a593Smuzhiyun spin_unlock_irqrestore(&dd->hash_lock, flags);
1388*4882a593Smuzhiyun return ret;
1389*4882a593Smuzhiyun }
1390*4882a593Smuzhiyun
1391*4882a593Smuzhiyun backlog = crypto_get_backlog(&dd->hash_queue);
1392*4882a593Smuzhiyun async_req = crypto_dequeue_request(&dd->hash_queue);
1393*4882a593Smuzhiyun if (async_req)
1394*4882a593Smuzhiyun set_bit(HASH_FLAGS_BUSY, &dd->hash_flags);
1395*4882a593Smuzhiyun
1396*4882a593Smuzhiyun spin_unlock_irqrestore(&dd->hash_lock, flags);
1397*4882a593Smuzhiyun
1398*4882a593Smuzhiyun if (!async_req)
1399*4882a593Smuzhiyun return ret;
1400*4882a593Smuzhiyun
1401*4882a593Smuzhiyun if (backlog)
1402*4882a593Smuzhiyun backlog->complete(backlog, -EINPROGRESS);
1403*4882a593Smuzhiyun
1404*4882a593Smuzhiyun req = ahash_request_cast(async_req);
1405*4882a593Smuzhiyun dd->hash_req = req;
1406*4882a593Smuzhiyun ctx = ahash_request_ctx(req);
1407*4882a593Smuzhiyun
1408*4882a593Smuzhiyun err = s5p_hash_prepare_request(req, ctx->op_update);
1409*4882a593Smuzhiyun if (err || !ctx->total)
1410*4882a593Smuzhiyun goto out;
1411*4882a593Smuzhiyun
1412*4882a593Smuzhiyun dev_dbg(dd->dev, "handling new req, op_update: %u, nbytes: %d\n",
1413*4882a593Smuzhiyun ctx->op_update, req->nbytes);
1414*4882a593Smuzhiyun
1415*4882a593Smuzhiyun s5p_ahash_dma_init(dd, SSS_HASHIN_INDEPENDENT);
1416*4882a593Smuzhiyun if (ctx->digcnt)
1417*4882a593Smuzhiyun s5p_hash_write_iv(req); /* restore hash IV */
1418*4882a593Smuzhiyun
1419*4882a593Smuzhiyun if (ctx->op_update) { /* HASH_OP_UPDATE */
1420*4882a593Smuzhiyun err = s5p_hash_xmit_dma(dd, ctx->total, ctx->finup);
1421*4882a593Smuzhiyun if (err != -EINPROGRESS && ctx->finup && !ctx->error)
1422*4882a593Smuzhiyun /* no final() after finup() */
1423*4882a593Smuzhiyun err = s5p_hash_xmit_dma(dd, ctx->total, true);
1424*4882a593Smuzhiyun } else { /* HASH_OP_FINAL */
1425*4882a593Smuzhiyun err = s5p_hash_xmit_dma(dd, ctx->total, true);
1426*4882a593Smuzhiyun }
1427*4882a593Smuzhiyun out:
1428*4882a593Smuzhiyun if (err != -EINPROGRESS) {
1429*4882a593Smuzhiyun /* hash_tasklet_cb will not finish it, so do it here */
1430*4882a593Smuzhiyun s5p_hash_finish_req(req, err);
1431*4882a593Smuzhiyun req = NULL;
1432*4882a593Smuzhiyun
1433*4882a593Smuzhiyun /*
1434*4882a593Smuzhiyun * Execute next request immediately if there is anything
1435*4882a593Smuzhiyun * in queue.
1436*4882a593Smuzhiyun */
1437*4882a593Smuzhiyun goto retry;
1438*4882a593Smuzhiyun }
1439*4882a593Smuzhiyun
1440*4882a593Smuzhiyun return ret;
1441*4882a593Smuzhiyun }
1442*4882a593Smuzhiyun
1443*4882a593Smuzhiyun /**
1444*4882a593Smuzhiyun * s5p_hash_tasklet_cb() - hash tasklet
1445*4882a593Smuzhiyun * @data: ptr to s5p_aes_dev
1446*4882a593Smuzhiyun */
s5p_hash_tasklet_cb(unsigned long data)1447*4882a593Smuzhiyun static void s5p_hash_tasklet_cb(unsigned long data)
1448*4882a593Smuzhiyun {
1449*4882a593Smuzhiyun struct s5p_aes_dev *dd = (struct s5p_aes_dev *)data;
1450*4882a593Smuzhiyun
1451*4882a593Smuzhiyun if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags)) {
1452*4882a593Smuzhiyun s5p_hash_handle_queue(dd, NULL);
1453*4882a593Smuzhiyun return;
1454*4882a593Smuzhiyun }
1455*4882a593Smuzhiyun
1456*4882a593Smuzhiyun if (test_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags)) {
1457*4882a593Smuzhiyun if (test_and_clear_bit(HASH_FLAGS_DMA_ACTIVE,
1458*4882a593Smuzhiyun &dd->hash_flags)) {
1459*4882a593Smuzhiyun s5p_hash_update_dma_stop(dd);
1460*4882a593Smuzhiyun }
1461*4882a593Smuzhiyun
1462*4882a593Smuzhiyun if (test_and_clear_bit(HASH_FLAGS_OUTPUT_READY,
1463*4882a593Smuzhiyun &dd->hash_flags)) {
1464*4882a593Smuzhiyun /* hash or semi-hash ready */
1465*4882a593Smuzhiyun clear_bit(HASH_FLAGS_DMA_READY, &dd->hash_flags);
1466*4882a593Smuzhiyun goto finish;
1467*4882a593Smuzhiyun }
1468*4882a593Smuzhiyun }
1469*4882a593Smuzhiyun
1470*4882a593Smuzhiyun return;
1471*4882a593Smuzhiyun
1472*4882a593Smuzhiyun finish:
1473*4882a593Smuzhiyun /* finish curent request */
1474*4882a593Smuzhiyun s5p_hash_finish_req(dd->hash_req, 0);
1475*4882a593Smuzhiyun
1476*4882a593Smuzhiyun /* If we are not busy, process next req */
1477*4882a593Smuzhiyun if (!test_bit(HASH_FLAGS_BUSY, &dd->hash_flags))
1478*4882a593Smuzhiyun s5p_hash_handle_queue(dd, NULL);
1479*4882a593Smuzhiyun }
1480*4882a593Smuzhiyun
1481*4882a593Smuzhiyun /**
1482*4882a593Smuzhiyun * s5p_hash_enqueue() - enqueue request
1483*4882a593Smuzhiyun * @req: AHASH request
1484*4882a593Smuzhiyun * @op: operation UPDATE (true) or FINAL (false)
1485*4882a593Smuzhiyun *
1486*4882a593Smuzhiyun * Returns: see s5p_hash_final below.
1487*4882a593Smuzhiyun */
s5p_hash_enqueue(struct ahash_request * req,bool op)1488*4882a593Smuzhiyun static int s5p_hash_enqueue(struct ahash_request *req, bool op)
1489*4882a593Smuzhiyun {
1490*4882a593Smuzhiyun struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1491*4882a593Smuzhiyun struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1492*4882a593Smuzhiyun
1493*4882a593Smuzhiyun ctx->op_update = op;
1494*4882a593Smuzhiyun
1495*4882a593Smuzhiyun return s5p_hash_handle_queue(tctx->dd, req);
1496*4882a593Smuzhiyun }
1497*4882a593Smuzhiyun
1498*4882a593Smuzhiyun /**
1499*4882a593Smuzhiyun * s5p_hash_update() - process the hash input data
1500*4882a593Smuzhiyun * @req: AHASH request
1501*4882a593Smuzhiyun *
1502*4882a593Smuzhiyun * If request will fit in buffer, copy it and return immediately
1503*4882a593Smuzhiyun * else enqueue it with OP_UPDATE.
1504*4882a593Smuzhiyun *
1505*4882a593Smuzhiyun * Returns: see s5p_hash_final below.
1506*4882a593Smuzhiyun */
s5p_hash_update(struct ahash_request * req)1507*4882a593Smuzhiyun static int s5p_hash_update(struct ahash_request *req)
1508*4882a593Smuzhiyun {
1509*4882a593Smuzhiyun struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1510*4882a593Smuzhiyun
1511*4882a593Smuzhiyun if (!req->nbytes)
1512*4882a593Smuzhiyun return 0;
1513*4882a593Smuzhiyun
1514*4882a593Smuzhiyun if (ctx->bufcnt + req->nbytes <= BUFLEN) {
1515*4882a593Smuzhiyun scatterwalk_map_and_copy(ctx->buffer + ctx->bufcnt, req->src,
1516*4882a593Smuzhiyun 0, req->nbytes, 0);
1517*4882a593Smuzhiyun ctx->bufcnt += req->nbytes;
1518*4882a593Smuzhiyun return 0;
1519*4882a593Smuzhiyun }
1520*4882a593Smuzhiyun
1521*4882a593Smuzhiyun return s5p_hash_enqueue(req, true); /* HASH_OP_UPDATE */
1522*4882a593Smuzhiyun }
1523*4882a593Smuzhiyun
1524*4882a593Smuzhiyun /**
1525*4882a593Smuzhiyun * s5p_hash_final() - close up hash and calculate digest
1526*4882a593Smuzhiyun * @req: AHASH request
1527*4882a593Smuzhiyun *
1528*4882a593Smuzhiyun * Note: in final req->src do not have any data, and req->nbytes can be
1529*4882a593Smuzhiyun * non-zero.
1530*4882a593Smuzhiyun *
1531*4882a593Smuzhiyun * If there were no input data processed yet and the buffered hash data is
1532*4882a593Smuzhiyun * less than BUFLEN (64) then calculate the final hash immediately by using
1533*4882a593Smuzhiyun * SW algorithm fallback.
1534*4882a593Smuzhiyun *
1535*4882a593Smuzhiyun * Otherwise enqueues the current AHASH request with OP_FINAL operation op
1536*4882a593Smuzhiyun * and finalize hash message in HW. Note that if digcnt!=0 then there were
1537*4882a593Smuzhiyun * previous update op, so there are always some buffered bytes in ctx->buffer,
1538*4882a593Smuzhiyun * which means that ctx->bufcnt!=0
1539*4882a593Smuzhiyun *
1540*4882a593Smuzhiyun * Returns:
1541*4882a593Smuzhiyun * 0 if the request has been processed immediately,
1542*4882a593Smuzhiyun * -EINPROGRESS if the operation has been queued for later execution or is set
1543*4882a593Smuzhiyun * to processing by HW,
1544*4882a593Smuzhiyun * -EBUSY if queue is full and request should be resubmitted later,
1545*4882a593Smuzhiyun * other negative values denotes an error.
1546*4882a593Smuzhiyun */
s5p_hash_final(struct ahash_request * req)1547*4882a593Smuzhiyun static int s5p_hash_final(struct ahash_request *req)
1548*4882a593Smuzhiyun {
1549*4882a593Smuzhiyun struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1550*4882a593Smuzhiyun
1551*4882a593Smuzhiyun ctx->finup = true;
1552*4882a593Smuzhiyun if (ctx->error)
1553*4882a593Smuzhiyun return -EINVAL; /* uncompleted hash is not needed */
1554*4882a593Smuzhiyun
1555*4882a593Smuzhiyun if (!ctx->digcnt && ctx->bufcnt < BUFLEN) {
1556*4882a593Smuzhiyun struct s5p_hash_ctx *tctx = crypto_tfm_ctx(req->base.tfm);
1557*4882a593Smuzhiyun
1558*4882a593Smuzhiyun return crypto_shash_tfm_digest(tctx->fallback, ctx->buffer,
1559*4882a593Smuzhiyun ctx->bufcnt, req->result);
1560*4882a593Smuzhiyun }
1561*4882a593Smuzhiyun
1562*4882a593Smuzhiyun return s5p_hash_enqueue(req, false); /* HASH_OP_FINAL */
1563*4882a593Smuzhiyun }
1564*4882a593Smuzhiyun
1565*4882a593Smuzhiyun /**
1566*4882a593Smuzhiyun * s5p_hash_finup() - process last req->src and calculate digest
1567*4882a593Smuzhiyun * @req: AHASH request containing the last update data
1568*4882a593Smuzhiyun *
1569*4882a593Smuzhiyun * Return values: see s5p_hash_final above.
1570*4882a593Smuzhiyun */
s5p_hash_finup(struct ahash_request * req)1571*4882a593Smuzhiyun static int s5p_hash_finup(struct ahash_request *req)
1572*4882a593Smuzhiyun {
1573*4882a593Smuzhiyun struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1574*4882a593Smuzhiyun int err1, err2;
1575*4882a593Smuzhiyun
1576*4882a593Smuzhiyun ctx->finup = true;
1577*4882a593Smuzhiyun
1578*4882a593Smuzhiyun err1 = s5p_hash_update(req);
1579*4882a593Smuzhiyun if (err1 == -EINPROGRESS || err1 == -EBUSY)
1580*4882a593Smuzhiyun return err1;
1581*4882a593Smuzhiyun
1582*4882a593Smuzhiyun /*
1583*4882a593Smuzhiyun * final() has to be always called to cleanup resources even if
1584*4882a593Smuzhiyun * update() failed, except EINPROGRESS or calculate digest for small
1585*4882a593Smuzhiyun * size
1586*4882a593Smuzhiyun */
1587*4882a593Smuzhiyun err2 = s5p_hash_final(req);
1588*4882a593Smuzhiyun
1589*4882a593Smuzhiyun return err1 ?: err2;
1590*4882a593Smuzhiyun }
1591*4882a593Smuzhiyun
1592*4882a593Smuzhiyun /**
1593*4882a593Smuzhiyun * s5p_hash_init() - initialize AHASH request contex
1594*4882a593Smuzhiyun * @req: AHASH request
1595*4882a593Smuzhiyun *
1596*4882a593Smuzhiyun * Init async hash request context.
1597*4882a593Smuzhiyun */
s5p_hash_init(struct ahash_request * req)1598*4882a593Smuzhiyun static int s5p_hash_init(struct ahash_request *req)
1599*4882a593Smuzhiyun {
1600*4882a593Smuzhiyun struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1601*4882a593Smuzhiyun struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1602*4882a593Smuzhiyun struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm);
1603*4882a593Smuzhiyun
1604*4882a593Smuzhiyun ctx->dd = tctx->dd;
1605*4882a593Smuzhiyun ctx->error = false;
1606*4882a593Smuzhiyun ctx->finup = false;
1607*4882a593Smuzhiyun ctx->bufcnt = 0;
1608*4882a593Smuzhiyun ctx->digcnt = 0;
1609*4882a593Smuzhiyun ctx->total = 0;
1610*4882a593Smuzhiyun ctx->skip = 0;
1611*4882a593Smuzhiyun
1612*4882a593Smuzhiyun dev_dbg(tctx->dd->dev, "init: digest size: %d\n",
1613*4882a593Smuzhiyun crypto_ahash_digestsize(tfm));
1614*4882a593Smuzhiyun
1615*4882a593Smuzhiyun switch (crypto_ahash_digestsize(tfm)) {
1616*4882a593Smuzhiyun case MD5_DIGEST_SIZE:
1617*4882a593Smuzhiyun ctx->engine = SSS_HASH_ENGINE_MD5;
1618*4882a593Smuzhiyun ctx->nregs = HASH_MD5_MAX_REG;
1619*4882a593Smuzhiyun break;
1620*4882a593Smuzhiyun case SHA1_DIGEST_SIZE:
1621*4882a593Smuzhiyun ctx->engine = SSS_HASH_ENGINE_SHA1;
1622*4882a593Smuzhiyun ctx->nregs = HASH_SHA1_MAX_REG;
1623*4882a593Smuzhiyun break;
1624*4882a593Smuzhiyun case SHA256_DIGEST_SIZE:
1625*4882a593Smuzhiyun ctx->engine = SSS_HASH_ENGINE_SHA256;
1626*4882a593Smuzhiyun ctx->nregs = HASH_SHA256_MAX_REG;
1627*4882a593Smuzhiyun break;
1628*4882a593Smuzhiyun default:
1629*4882a593Smuzhiyun ctx->error = true;
1630*4882a593Smuzhiyun return -EINVAL;
1631*4882a593Smuzhiyun }
1632*4882a593Smuzhiyun
1633*4882a593Smuzhiyun return 0;
1634*4882a593Smuzhiyun }
1635*4882a593Smuzhiyun
1636*4882a593Smuzhiyun /**
1637*4882a593Smuzhiyun * s5p_hash_digest - calculate digest from req->src
1638*4882a593Smuzhiyun * @req: AHASH request
1639*4882a593Smuzhiyun *
1640*4882a593Smuzhiyun * Return values: see s5p_hash_final above.
1641*4882a593Smuzhiyun */
s5p_hash_digest(struct ahash_request * req)1642*4882a593Smuzhiyun static int s5p_hash_digest(struct ahash_request *req)
1643*4882a593Smuzhiyun {
1644*4882a593Smuzhiyun return s5p_hash_init(req) ?: s5p_hash_finup(req);
1645*4882a593Smuzhiyun }
1646*4882a593Smuzhiyun
1647*4882a593Smuzhiyun /**
1648*4882a593Smuzhiyun * s5p_hash_cra_init_alg - init crypto alg transformation
1649*4882a593Smuzhiyun * @tfm: crypto transformation
1650*4882a593Smuzhiyun */
s5p_hash_cra_init_alg(struct crypto_tfm * tfm)1651*4882a593Smuzhiyun static int s5p_hash_cra_init_alg(struct crypto_tfm *tfm)
1652*4882a593Smuzhiyun {
1653*4882a593Smuzhiyun struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm);
1654*4882a593Smuzhiyun const char *alg_name = crypto_tfm_alg_name(tfm);
1655*4882a593Smuzhiyun
1656*4882a593Smuzhiyun tctx->dd = s5p_dev;
1657*4882a593Smuzhiyun /* Allocate a fallback and abort if it failed. */
1658*4882a593Smuzhiyun tctx->fallback = crypto_alloc_shash(alg_name, 0,
1659*4882a593Smuzhiyun CRYPTO_ALG_NEED_FALLBACK);
1660*4882a593Smuzhiyun if (IS_ERR(tctx->fallback)) {
1661*4882a593Smuzhiyun pr_err("fallback alloc fails for '%s'\n", alg_name);
1662*4882a593Smuzhiyun return PTR_ERR(tctx->fallback);
1663*4882a593Smuzhiyun }
1664*4882a593Smuzhiyun
1665*4882a593Smuzhiyun crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm),
1666*4882a593Smuzhiyun sizeof(struct s5p_hash_reqctx) + BUFLEN);
1667*4882a593Smuzhiyun
1668*4882a593Smuzhiyun return 0;
1669*4882a593Smuzhiyun }
1670*4882a593Smuzhiyun
1671*4882a593Smuzhiyun /**
1672*4882a593Smuzhiyun * s5p_hash_cra_init - init crypto tfm
1673*4882a593Smuzhiyun * @tfm: crypto transformation
1674*4882a593Smuzhiyun */
s5p_hash_cra_init(struct crypto_tfm * tfm)1675*4882a593Smuzhiyun static int s5p_hash_cra_init(struct crypto_tfm *tfm)
1676*4882a593Smuzhiyun {
1677*4882a593Smuzhiyun return s5p_hash_cra_init_alg(tfm);
1678*4882a593Smuzhiyun }
1679*4882a593Smuzhiyun
1680*4882a593Smuzhiyun /**
1681*4882a593Smuzhiyun * s5p_hash_cra_exit - exit crypto tfm
1682*4882a593Smuzhiyun * @tfm: crypto transformation
1683*4882a593Smuzhiyun *
1684*4882a593Smuzhiyun * free allocated fallback
1685*4882a593Smuzhiyun */
s5p_hash_cra_exit(struct crypto_tfm * tfm)1686*4882a593Smuzhiyun static void s5p_hash_cra_exit(struct crypto_tfm *tfm)
1687*4882a593Smuzhiyun {
1688*4882a593Smuzhiyun struct s5p_hash_ctx *tctx = crypto_tfm_ctx(tfm);
1689*4882a593Smuzhiyun
1690*4882a593Smuzhiyun crypto_free_shash(tctx->fallback);
1691*4882a593Smuzhiyun tctx->fallback = NULL;
1692*4882a593Smuzhiyun }
1693*4882a593Smuzhiyun
1694*4882a593Smuzhiyun /**
1695*4882a593Smuzhiyun * s5p_hash_export - export hash state
1696*4882a593Smuzhiyun * @req: AHASH request
1697*4882a593Smuzhiyun * @out: buffer for exported state
1698*4882a593Smuzhiyun */
s5p_hash_export(struct ahash_request * req,void * out)1699*4882a593Smuzhiyun static int s5p_hash_export(struct ahash_request *req, void *out)
1700*4882a593Smuzhiyun {
1701*4882a593Smuzhiyun const struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1702*4882a593Smuzhiyun
1703*4882a593Smuzhiyun memcpy(out, ctx, sizeof(*ctx) + ctx->bufcnt);
1704*4882a593Smuzhiyun
1705*4882a593Smuzhiyun return 0;
1706*4882a593Smuzhiyun }
1707*4882a593Smuzhiyun
1708*4882a593Smuzhiyun /**
1709*4882a593Smuzhiyun * s5p_hash_import - import hash state
1710*4882a593Smuzhiyun * @req: AHASH request
1711*4882a593Smuzhiyun * @in: buffer with state to be imported from
1712*4882a593Smuzhiyun */
s5p_hash_import(struct ahash_request * req,const void * in)1713*4882a593Smuzhiyun static int s5p_hash_import(struct ahash_request *req, const void *in)
1714*4882a593Smuzhiyun {
1715*4882a593Smuzhiyun struct s5p_hash_reqctx *ctx = ahash_request_ctx(req);
1716*4882a593Smuzhiyun struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
1717*4882a593Smuzhiyun struct s5p_hash_ctx *tctx = crypto_ahash_ctx(tfm);
1718*4882a593Smuzhiyun const struct s5p_hash_reqctx *ctx_in = in;
1719*4882a593Smuzhiyun
1720*4882a593Smuzhiyun memcpy(ctx, in, sizeof(*ctx) + BUFLEN);
1721*4882a593Smuzhiyun if (ctx_in->bufcnt > BUFLEN) {
1722*4882a593Smuzhiyun ctx->error = true;
1723*4882a593Smuzhiyun return -EINVAL;
1724*4882a593Smuzhiyun }
1725*4882a593Smuzhiyun
1726*4882a593Smuzhiyun ctx->dd = tctx->dd;
1727*4882a593Smuzhiyun ctx->error = false;
1728*4882a593Smuzhiyun
1729*4882a593Smuzhiyun return 0;
1730*4882a593Smuzhiyun }
1731*4882a593Smuzhiyun
1732*4882a593Smuzhiyun static struct ahash_alg algs_sha1_md5_sha256[] = {
1733*4882a593Smuzhiyun {
1734*4882a593Smuzhiyun .init = s5p_hash_init,
1735*4882a593Smuzhiyun .update = s5p_hash_update,
1736*4882a593Smuzhiyun .final = s5p_hash_final,
1737*4882a593Smuzhiyun .finup = s5p_hash_finup,
1738*4882a593Smuzhiyun .digest = s5p_hash_digest,
1739*4882a593Smuzhiyun .export = s5p_hash_export,
1740*4882a593Smuzhiyun .import = s5p_hash_import,
1741*4882a593Smuzhiyun .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
1742*4882a593Smuzhiyun .halg.digestsize = SHA1_DIGEST_SIZE,
1743*4882a593Smuzhiyun .halg.base = {
1744*4882a593Smuzhiyun .cra_name = "sha1",
1745*4882a593Smuzhiyun .cra_driver_name = "exynos-sha1",
1746*4882a593Smuzhiyun .cra_priority = 100,
1747*4882a593Smuzhiyun .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1748*4882a593Smuzhiyun CRYPTO_ALG_ASYNC |
1749*4882a593Smuzhiyun CRYPTO_ALG_NEED_FALLBACK,
1750*4882a593Smuzhiyun .cra_blocksize = HASH_BLOCK_SIZE,
1751*4882a593Smuzhiyun .cra_ctxsize = sizeof(struct s5p_hash_ctx),
1752*4882a593Smuzhiyun .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
1753*4882a593Smuzhiyun .cra_module = THIS_MODULE,
1754*4882a593Smuzhiyun .cra_init = s5p_hash_cra_init,
1755*4882a593Smuzhiyun .cra_exit = s5p_hash_cra_exit,
1756*4882a593Smuzhiyun }
1757*4882a593Smuzhiyun },
1758*4882a593Smuzhiyun {
1759*4882a593Smuzhiyun .init = s5p_hash_init,
1760*4882a593Smuzhiyun .update = s5p_hash_update,
1761*4882a593Smuzhiyun .final = s5p_hash_final,
1762*4882a593Smuzhiyun .finup = s5p_hash_finup,
1763*4882a593Smuzhiyun .digest = s5p_hash_digest,
1764*4882a593Smuzhiyun .export = s5p_hash_export,
1765*4882a593Smuzhiyun .import = s5p_hash_import,
1766*4882a593Smuzhiyun .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
1767*4882a593Smuzhiyun .halg.digestsize = MD5_DIGEST_SIZE,
1768*4882a593Smuzhiyun .halg.base = {
1769*4882a593Smuzhiyun .cra_name = "md5",
1770*4882a593Smuzhiyun .cra_driver_name = "exynos-md5",
1771*4882a593Smuzhiyun .cra_priority = 100,
1772*4882a593Smuzhiyun .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1773*4882a593Smuzhiyun CRYPTO_ALG_ASYNC |
1774*4882a593Smuzhiyun CRYPTO_ALG_NEED_FALLBACK,
1775*4882a593Smuzhiyun .cra_blocksize = HASH_BLOCK_SIZE,
1776*4882a593Smuzhiyun .cra_ctxsize = sizeof(struct s5p_hash_ctx),
1777*4882a593Smuzhiyun .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
1778*4882a593Smuzhiyun .cra_module = THIS_MODULE,
1779*4882a593Smuzhiyun .cra_init = s5p_hash_cra_init,
1780*4882a593Smuzhiyun .cra_exit = s5p_hash_cra_exit,
1781*4882a593Smuzhiyun }
1782*4882a593Smuzhiyun },
1783*4882a593Smuzhiyun {
1784*4882a593Smuzhiyun .init = s5p_hash_init,
1785*4882a593Smuzhiyun .update = s5p_hash_update,
1786*4882a593Smuzhiyun .final = s5p_hash_final,
1787*4882a593Smuzhiyun .finup = s5p_hash_finup,
1788*4882a593Smuzhiyun .digest = s5p_hash_digest,
1789*4882a593Smuzhiyun .export = s5p_hash_export,
1790*4882a593Smuzhiyun .import = s5p_hash_import,
1791*4882a593Smuzhiyun .halg.statesize = sizeof(struct s5p_hash_reqctx) + BUFLEN,
1792*4882a593Smuzhiyun .halg.digestsize = SHA256_DIGEST_SIZE,
1793*4882a593Smuzhiyun .halg.base = {
1794*4882a593Smuzhiyun .cra_name = "sha256",
1795*4882a593Smuzhiyun .cra_driver_name = "exynos-sha256",
1796*4882a593Smuzhiyun .cra_priority = 100,
1797*4882a593Smuzhiyun .cra_flags = CRYPTO_ALG_KERN_DRIVER_ONLY |
1798*4882a593Smuzhiyun CRYPTO_ALG_ASYNC |
1799*4882a593Smuzhiyun CRYPTO_ALG_NEED_FALLBACK,
1800*4882a593Smuzhiyun .cra_blocksize = HASH_BLOCK_SIZE,
1801*4882a593Smuzhiyun .cra_ctxsize = sizeof(struct s5p_hash_ctx),
1802*4882a593Smuzhiyun .cra_alignmask = SSS_HASH_DMA_ALIGN_MASK,
1803*4882a593Smuzhiyun .cra_module = THIS_MODULE,
1804*4882a593Smuzhiyun .cra_init = s5p_hash_cra_init,
1805*4882a593Smuzhiyun .cra_exit = s5p_hash_cra_exit,
1806*4882a593Smuzhiyun }
1807*4882a593Smuzhiyun }
1808*4882a593Smuzhiyun
1809*4882a593Smuzhiyun };
1810*4882a593Smuzhiyun
s5p_set_aes(struct s5p_aes_dev * dev,const u8 * key,const u8 * iv,const u8 * ctr,unsigned int keylen)1811*4882a593Smuzhiyun static void s5p_set_aes(struct s5p_aes_dev *dev,
1812*4882a593Smuzhiyun const u8 *key, const u8 *iv, const u8 *ctr,
1813*4882a593Smuzhiyun unsigned int keylen)
1814*4882a593Smuzhiyun {
1815*4882a593Smuzhiyun void __iomem *keystart;
1816*4882a593Smuzhiyun
1817*4882a593Smuzhiyun if (iv)
1818*4882a593Smuzhiyun memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_IV_DATA(0), iv,
1819*4882a593Smuzhiyun AES_BLOCK_SIZE);
1820*4882a593Smuzhiyun
1821*4882a593Smuzhiyun if (ctr)
1822*4882a593Smuzhiyun memcpy_toio(dev->aes_ioaddr + SSS_REG_AES_CNT_DATA(0), ctr,
1823*4882a593Smuzhiyun AES_BLOCK_SIZE);
1824*4882a593Smuzhiyun
1825*4882a593Smuzhiyun if (keylen == AES_KEYSIZE_256)
1826*4882a593Smuzhiyun keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(0);
1827*4882a593Smuzhiyun else if (keylen == AES_KEYSIZE_192)
1828*4882a593Smuzhiyun keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(2);
1829*4882a593Smuzhiyun else
1830*4882a593Smuzhiyun keystart = dev->aes_ioaddr + SSS_REG_AES_KEY_DATA(4);
1831*4882a593Smuzhiyun
1832*4882a593Smuzhiyun memcpy_toio(keystart, key, keylen);
1833*4882a593Smuzhiyun }
1834*4882a593Smuzhiyun
s5p_is_sg_aligned(struct scatterlist * sg)1835*4882a593Smuzhiyun static bool s5p_is_sg_aligned(struct scatterlist *sg)
1836*4882a593Smuzhiyun {
1837*4882a593Smuzhiyun while (sg) {
1838*4882a593Smuzhiyun if (!IS_ALIGNED(sg->length, AES_BLOCK_SIZE))
1839*4882a593Smuzhiyun return false;
1840*4882a593Smuzhiyun sg = sg_next(sg);
1841*4882a593Smuzhiyun }
1842*4882a593Smuzhiyun
1843*4882a593Smuzhiyun return true;
1844*4882a593Smuzhiyun }
1845*4882a593Smuzhiyun
s5p_set_indata_start(struct s5p_aes_dev * dev,struct skcipher_request * req)1846*4882a593Smuzhiyun static int s5p_set_indata_start(struct s5p_aes_dev *dev,
1847*4882a593Smuzhiyun struct skcipher_request *req)
1848*4882a593Smuzhiyun {
1849*4882a593Smuzhiyun struct scatterlist *sg;
1850*4882a593Smuzhiyun int err;
1851*4882a593Smuzhiyun
1852*4882a593Smuzhiyun dev->sg_src_cpy = NULL;
1853*4882a593Smuzhiyun sg = req->src;
1854*4882a593Smuzhiyun if (!s5p_is_sg_aligned(sg)) {
1855*4882a593Smuzhiyun dev_dbg(dev->dev,
1856*4882a593Smuzhiyun "At least one unaligned source scatter list, making a copy\n");
1857*4882a593Smuzhiyun err = s5p_make_sg_cpy(dev, sg, &dev->sg_src_cpy);
1858*4882a593Smuzhiyun if (err)
1859*4882a593Smuzhiyun return err;
1860*4882a593Smuzhiyun
1861*4882a593Smuzhiyun sg = dev->sg_src_cpy;
1862*4882a593Smuzhiyun }
1863*4882a593Smuzhiyun
1864*4882a593Smuzhiyun err = s5p_set_indata(dev, sg);
1865*4882a593Smuzhiyun if (err) {
1866*4882a593Smuzhiyun s5p_free_sg_cpy(dev, &dev->sg_src_cpy);
1867*4882a593Smuzhiyun return err;
1868*4882a593Smuzhiyun }
1869*4882a593Smuzhiyun
1870*4882a593Smuzhiyun return 0;
1871*4882a593Smuzhiyun }
1872*4882a593Smuzhiyun
s5p_set_outdata_start(struct s5p_aes_dev * dev,struct skcipher_request * req)1873*4882a593Smuzhiyun static int s5p_set_outdata_start(struct s5p_aes_dev *dev,
1874*4882a593Smuzhiyun struct skcipher_request *req)
1875*4882a593Smuzhiyun {
1876*4882a593Smuzhiyun struct scatterlist *sg;
1877*4882a593Smuzhiyun int err;
1878*4882a593Smuzhiyun
1879*4882a593Smuzhiyun dev->sg_dst_cpy = NULL;
1880*4882a593Smuzhiyun sg = req->dst;
1881*4882a593Smuzhiyun if (!s5p_is_sg_aligned(sg)) {
1882*4882a593Smuzhiyun dev_dbg(dev->dev,
1883*4882a593Smuzhiyun "At least one unaligned dest scatter list, making a copy\n");
1884*4882a593Smuzhiyun err = s5p_make_sg_cpy(dev, sg, &dev->sg_dst_cpy);
1885*4882a593Smuzhiyun if (err)
1886*4882a593Smuzhiyun return err;
1887*4882a593Smuzhiyun
1888*4882a593Smuzhiyun sg = dev->sg_dst_cpy;
1889*4882a593Smuzhiyun }
1890*4882a593Smuzhiyun
1891*4882a593Smuzhiyun err = s5p_set_outdata(dev, sg);
1892*4882a593Smuzhiyun if (err) {
1893*4882a593Smuzhiyun s5p_free_sg_cpy(dev, &dev->sg_dst_cpy);
1894*4882a593Smuzhiyun return err;
1895*4882a593Smuzhiyun }
1896*4882a593Smuzhiyun
1897*4882a593Smuzhiyun return 0;
1898*4882a593Smuzhiyun }
1899*4882a593Smuzhiyun
s5p_aes_crypt_start(struct s5p_aes_dev * dev,unsigned long mode)1900*4882a593Smuzhiyun static void s5p_aes_crypt_start(struct s5p_aes_dev *dev, unsigned long mode)
1901*4882a593Smuzhiyun {
1902*4882a593Smuzhiyun struct skcipher_request *req = dev->req;
1903*4882a593Smuzhiyun u32 aes_control;
1904*4882a593Smuzhiyun unsigned long flags;
1905*4882a593Smuzhiyun int err;
1906*4882a593Smuzhiyun u8 *iv, *ctr;
1907*4882a593Smuzhiyun
1908*4882a593Smuzhiyun /* This sets bit [13:12] to 00, which selects 128-bit counter */
1909*4882a593Smuzhiyun aes_control = SSS_AES_KEY_CHANGE_MODE;
1910*4882a593Smuzhiyun if (mode & FLAGS_AES_DECRYPT)
1911*4882a593Smuzhiyun aes_control |= SSS_AES_MODE_DECRYPT;
1912*4882a593Smuzhiyun
1913*4882a593Smuzhiyun if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CBC) {
1914*4882a593Smuzhiyun aes_control |= SSS_AES_CHAIN_MODE_CBC;
1915*4882a593Smuzhiyun iv = req->iv;
1916*4882a593Smuzhiyun ctr = NULL;
1917*4882a593Smuzhiyun } else if ((mode & FLAGS_AES_MODE_MASK) == FLAGS_AES_CTR) {
1918*4882a593Smuzhiyun aes_control |= SSS_AES_CHAIN_MODE_CTR;
1919*4882a593Smuzhiyun iv = NULL;
1920*4882a593Smuzhiyun ctr = req->iv;
1921*4882a593Smuzhiyun } else {
1922*4882a593Smuzhiyun iv = NULL; /* AES_ECB */
1923*4882a593Smuzhiyun ctr = NULL;
1924*4882a593Smuzhiyun }
1925*4882a593Smuzhiyun
1926*4882a593Smuzhiyun if (dev->ctx->keylen == AES_KEYSIZE_192)
1927*4882a593Smuzhiyun aes_control |= SSS_AES_KEY_SIZE_192;
1928*4882a593Smuzhiyun else if (dev->ctx->keylen == AES_KEYSIZE_256)
1929*4882a593Smuzhiyun aes_control |= SSS_AES_KEY_SIZE_256;
1930*4882a593Smuzhiyun
1931*4882a593Smuzhiyun aes_control |= SSS_AES_FIFO_MODE;
1932*4882a593Smuzhiyun
1933*4882a593Smuzhiyun /* as a variant it is possible to use byte swapping on DMA side */
1934*4882a593Smuzhiyun aes_control |= SSS_AES_BYTESWAP_DI
1935*4882a593Smuzhiyun | SSS_AES_BYTESWAP_DO
1936*4882a593Smuzhiyun | SSS_AES_BYTESWAP_IV
1937*4882a593Smuzhiyun | SSS_AES_BYTESWAP_KEY
1938*4882a593Smuzhiyun | SSS_AES_BYTESWAP_CNT;
1939*4882a593Smuzhiyun
1940*4882a593Smuzhiyun spin_lock_irqsave(&dev->lock, flags);
1941*4882a593Smuzhiyun
1942*4882a593Smuzhiyun SSS_WRITE(dev, FCINTENCLR,
1943*4882a593Smuzhiyun SSS_FCINTENCLR_BTDMAINTENCLR | SSS_FCINTENCLR_BRDMAINTENCLR);
1944*4882a593Smuzhiyun SSS_WRITE(dev, FCFIFOCTRL, 0x00);
1945*4882a593Smuzhiyun
1946*4882a593Smuzhiyun err = s5p_set_indata_start(dev, req);
1947*4882a593Smuzhiyun if (err)
1948*4882a593Smuzhiyun goto indata_error;
1949*4882a593Smuzhiyun
1950*4882a593Smuzhiyun err = s5p_set_outdata_start(dev, req);
1951*4882a593Smuzhiyun if (err)
1952*4882a593Smuzhiyun goto outdata_error;
1953*4882a593Smuzhiyun
1954*4882a593Smuzhiyun SSS_AES_WRITE(dev, AES_CONTROL, aes_control);
1955*4882a593Smuzhiyun s5p_set_aes(dev, dev->ctx->aes_key, iv, ctr, dev->ctx->keylen);
1956*4882a593Smuzhiyun
1957*4882a593Smuzhiyun s5p_set_dma_indata(dev, dev->sg_src);
1958*4882a593Smuzhiyun s5p_set_dma_outdata(dev, dev->sg_dst);
1959*4882a593Smuzhiyun
1960*4882a593Smuzhiyun SSS_WRITE(dev, FCINTENSET,
1961*4882a593Smuzhiyun SSS_FCINTENSET_BTDMAINTENSET | SSS_FCINTENSET_BRDMAINTENSET);
1962*4882a593Smuzhiyun
1963*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->lock, flags);
1964*4882a593Smuzhiyun
1965*4882a593Smuzhiyun return;
1966*4882a593Smuzhiyun
1967*4882a593Smuzhiyun outdata_error:
1968*4882a593Smuzhiyun s5p_unset_indata(dev);
1969*4882a593Smuzhiyun
1970*4882a593Smuzhiyun indata_error:
1971*4882a593Smuzhiyun s5p_sg_done(dev);
1972*4882a593Smuzhiyun dev->busy = false;
1973*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->lock, flags);
1974*4882a593Smuzhiyun s5p_aes_complete(req, err);
1975*4882a593Smuzhiyun }
1976*4882a593Smuzhiyun
s5p_tasklet_cb(unsigned long data)1977*4882a593Smuzhiyun static void s5p_tasklet_cb(unsigned long data)
1978*4882a593Smuzhiyun {
1979*4882a593Smuzhiyun struct s5p_aes_dev *dev = (struct s5p_aes_dev *)data;
1980*4882a593Smuzhiyun struct crypto_async_request *async_req, *backlog;
1981*4882a593Smuzhiyun struct s5p_aes_reqctx *reqctx;
1982*4882a593Smuzhiyun unsigned long flags;
1983*4882a593Smuzhiyun
1984*4882a593Smuzhiyun spin_lock_irqsave(&dev->lock, flags);
1985*4882a593Smuzhiyun backlog = crypto_get_backlog(&dev->queue);
1986*4882a593Smuzhiyun async_req = crypto_dequeue_request(&dev->queue);
1987*4882a593Smuzhiyun
1988*4882a593Smuzhiyun if (!async_req) {
1989*4882a593Smuzhiyun dev->busy = false;
1990*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->lock, flags);
1991*4882a593Smuzhiyun return;
1992*4882a593Smuzhiyun }
1993*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->lock, flags);
1994*4882a593Smuzhiyun
1995*4882a593Smuzhiyun if (backlog)
1996*4882a593Smuzhiyun backlog->complete(backlog, -EINPROGRESS);
1997*4882a593Smuzhiyun
1998*4882a593Smuzhiyun dev->req = skcipher_request_cast(async_req);
1999*4882a593Smuzhiyun dev->ctx = crypto_tfm_ctx(dev->req->base.tfm);
2000*4882a593Smuzhiyun reqctx = skcipher_request_ctx(dev->req);
2001*4882a593Smuzhiyun
2002*4882a593Smuzhiyun s5p_aes_crypt_start(dev, reqctx->mode);
2003*4882a593Smuzhiyun }
2004*4882a593Smuzhiyun
s5p_aes_handle_req(struct s5p_aes_dev * dev,struct skcipher_request * req)2005*4882a593Smuzhiyun static int s5p_aes_handle_req(struct s5p_aes_dev *dev,
2006*4882a593Smuzhiyun struct skcipher_request *req)
2007*4882a593Smuzhiyun {
2008*4882a593Smuzhiyun unsigned long flags;
2009*4882a593Smuzhiyun int err;
2010*4882a593Smuzhiyun
2011*4882a593Smuzhiyun spin_lock_irqsave(&dev->lock, flags);
2012*4882a593Smuzhiyun err = crypto_enqueue_request(&dev->queue, &req->base);
2013*4882a593Smuzhiyun if (dev->busy) {
2014*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->lock, flags);
2015*4882a593Smuzhiyun return err;
2016*4882a593Smuzhiyun }
2017*4882a593Smuzhiyun dev->busy = true;
2018*4882a593Smuzhiyun
2019*4882a593Smuzhiyun spin_unlock_irqrestore(&dev->lock, flags);
2020*4882a593Smuzhiyun
2021*4882a593Smuzhiyun tasklet_schedule(&dev->tasklet);
2022*4882a593Smuzhiyun
2023*4882a593Smuzhiyun return err;
2024*4882a593Smuzhiyun }
2025*4882a593Smuzhiyun
s5p_aes_crypt(struct skcipher_request * req,unsigned long mode)2026*4882a593Smuzhiyun static int s5p_aes_crypt(struct skcipher_request *req, unsigned long mode)
2027*4882a593Smuzhiyun {
2028*4882a593Smuzhiyun struct crypto_skcipher *tfm = crypto_skcipher_reqtfm(req);
2029*4882a593Smuzhiyun struct s5p_aes_reqctx *reqctx = skcipher_request_ctx(req);
2030*4882a593Smuzhiyun struct s5p_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
2031*4882a593Smuzhiyun struct s5p_aes_dev *dev = ctx->dev;
2032*4882a593Smuzhiyun
2033*4882a593Smuzhiyun if (!req->cryptlen)
2034*4882a593Smuzhiyun return 0;
2035*4882a593Smuzhiyun
2036*4882a593Smuzhiyun if (!IS_ALIGNED(req->cryptlen, AES_BLOCK_SIZE) &&
2037*4882a593Smuzhiyun ((mode & FLAGS_AES_MODE_MASK) != FLAGS_AES_CTR)) {
2038*4882a593Smuzhiyun dev_dbg(dev->dev, "request size is not exact amount of AES blocks\n");
2039*4882a593Smuzhiyun return -EINVAL;
2040*4882a593Smuzhiyun }
2041*4882a593Smuzhiyun
2042*4882a593Smuzhiyun reqctx->mode = mode;
2043*4882a593Smuzhiyun
2044*4882a593Smuzhiyun return s5p_aes_handle_req(dev, req);
2045*4882a593Smuzhiyun }
2046*4882a593Smuzhiyun
s5p_aes_setkey(struct crypto_skcipher * cipher,const u8 * key,unsigned int keylen)2047*4882a593Smuzhiyun static int s5p_aes_setkey(struct crypto_skcipher *cipher,
2048*4882a593Smuzhiyun const u8 *key, unsigned int keylen)
2049*4882a593Smuzhiyun {
2050*4882a593Smuzhiyun struct crypto_tfm *tfm = crypto_skcipher_tfm(cipher);
2051*4882a593Smuzhiyun struct s5p_aes_ctx *ctx = crypto_tfm_ctx(tfm);
2052*4882a593Smuzhiyun
2053*4882a593Smuzhiyun if (keylen != AES_KEYSIZE_128 &&
2054*4882a593Smuzhiyun keylen != AES_KEYSIZE_192 &&
2055*4882a593Smuzhiyun keylen != AES_KEYSIZE_256)
2056*4882a593Smuzhiyun return -EINVAL;
2057*4882a593Smuzhiyun
2058*4882a593Smuzhiyun memcpy(ctx->aes_key, key, keylen);
2059*4882a593Smuzhiyun ctx->keylen = keylen;
2060*4882a593Smuzhiyun
2061*4882a593Smuzhiyun return 0;
2062*4882a593Smuzhiyun }
2063*4882a593Smuzhiyun
s5p_aes_ecb_encrypt(struct skcipher_request * req)2064*4882a593Smuzhiyun static int s5p_aes_ecb_encrypt(struct skcipher_request *req)
2065*4882a593Smuzhiyun {
2066*4882a593Smuzhiyun return s5p_aes_crypt(req, 0);
2067*4882a593Smuzhiyun }
2068*4882a593Smuzhiyun
s5p_aes_ecb_decrypt(struct skcipher_request * req)2069*4882a593Smuzhiyun static int s5p_aes_ecb_decrypt(struct skcipher_request *req)
2070*4882a593Smuzhiyun {
2071*4882a593Smuzhiyun return s5p_aes_crypt(req, FLAGS_AES_DECRYPT);
2072*4882a593Smuzhiyun }
2073*4882a593Smuzhiyun
s5p_aes_cbc_encrypt(struct skcipher_request * req)2074*4882a593Smuzhiyun static int s5p_aes_cbc_encrypt(struct skcipher_request *req)
2075*4882a593Smuzhiyun {
2076*4882a593Smuzhiyun return s5p_aes_crypt(req, FLAGS_AES_CBC);
2077*4882a593Smuzhiyun }
2078*4882a593Smuzhiyun
s5p_aes_cbc_decrypt(struct skcipher_request * req)2079*4882a593Smuzhiyun static int s5p_aes_cbc_decrypt(struct skcipher_request *req)
2080*4882a593Smuzhiyun {
2081*4882a593Smuzhiyun return s5p_aes_crypt(req, FLAGS_AES_DECRYPT | FLAGS_AES_CBC);
2082*4882a593Smuzhiyun }
2083*4882a593Smuzhiyun
s5p_aes_ctr_crypt(struct skcipher_request * req)2084*4882a593Smuzhiyun static int s5p_aes_ctr_crypt(struct skcipher_request *req)
2085*4882a593Smuzhiyun {
2086*4882a593Smuzhiyun return s5p_aes_crypt(req, FLAGS_AES_CTR);
2087*4882a593Smuzhiyun }
2088*4882a593Smuzhiyun
s5p_aes_init_tfm(struct crypto_skcipher * tfm)2089*4882a593Smuzhiyun static int s5p_aes_init_tfm(struct crypto_skcipher *tfm)
2090*4882a593Smuzhiyun {
2091*4882a593Smuzhiyun struct s5p_aes_ctx *ctx = crypto_skcipher_ctx(tfm);
2092*4882a593Smuzhiyun
2093*4882a593Smuzhiyun ctx->dev = s5p_dev;
2094*4882a593Smuzhiyun crypto_skcipher_set_reqsize(tfm, sizeof(struct s5p_aes_reqctx));
2095*4882a593Smuzhiyun
2096*4882a593Smuzhiyun return 0;
2097*4882a593Smuzhiyun }
2098*4882a593Smuzhiyun
2099*4882a593Smuzhiyun static struct skcipher_alg algs[] = {
2100*4882a593Smuzhiyun {
2101*4882a593Smuzhiyun .base.cra_name = "ecb(aes)",
2102*4882a593Smuzhiyun .base.cra_driver_name = "ecb-aes-s5p",
2103*4882a593Smuzhiyun .base.cra_priority = 100,
2104*4882a593Smuzhiyun .base.cra_flags = CRYPTO_ALG_ASYNC |
2105*4882a593Smuzhiyun CRYPTO_ALG_KERN_DRIVER_ONLY,
2106*4882a593Smuzhiyun .base.cra_blocksize = AES_BLOCK_SIZE,
2107*4882a593Smuzhiyun .base.cra_ctxsize = sizeof(struct s5p_aes_ctx),
2108*4882a593Smuzhiyun .base.cra_alignmask = 0x0f,
2109*4882a593Smuzhiyun .base.cra_module = THIS_MODULE,
2110*4882a593Smuzhiyun
2111*4882a593Smuzhiyun .min_keysize = AES_MIN_KEY_SIZE,
2112*4882a593Smuzhiyun .max_keysize = AES_MAX_KEY_SIZE,
2113*4882a593Smuzhiyun .setkey = s5p_aes_setkey,
2114*4882a593Smuzhiyun .encrypt = s5p_aes_ecb_encrypt,
2115*4882a593Smuzhiyun .decrypt = s5p_aes_ecb_decrypt,
2116*4882a593Smuzhiyun .init = s5p_aes_init_tfm,
2117*4882a593Smuzhiyun },
2118*4882a593Smuzhiyun {
2119*4882a593Smuzhiyun .base.cra_name = "cbc(aes)",
2120*4882a593Smuzhiyun .base.cra_driver_name = "cbc-aes-s5p",
2121*4882a593Smuzhiyun .base.cra_priority = 100,
2122*4882a593Smuzhiyun .base.cra_flags = CRYPTO_ALG_ASYNC |
2123*4882a593Smuzhiyun CRYPTO_ALG_KERN_DRIVER_ONLY,
2124*4882a593Smuzhiyun .base.cra_blocksize = AES_BLOCK_SIZE,
2125*4882a593Smuzhiyun .base.cra_ctxsize = sizeof(struct s5p_aes_ctx),
2126*4882a593Smuzhiyun .base.cra_alignmask = 0x0f,
2127*4882a593Smuzhiyun .base.cra_module = THIS_MODULE,
2128*4882a593Smuzhiyun
2129*4882a593Smuzhiyun .min_keysize = AES_MIN_KEY_SIZE,
2130*4882a593Smuzhiyun .max_keysize = AES_MAX_KEY_SIZE,
2131*4882a593Smuzhiyun .ivsize = AES_BLOCK_SIZE,
2132*4882a593Smuzhiyun .setkey = s5p_aes_setkey,
2133*4882a593Smuzhiyun .encrypt = s5p_aes_cbc_encrypt,
2134*4882a593Smuzhiyun .decrypt = s5p_aes_cbc_decrypt,
2135*4882a593Smuzhiyun .init = s5p_aes_init_tfm,
2136*4882a593Smuzhiyun },
2137*4882a593Smuzhiyun {
2138*4882a593Smuzhiyun .base.cra_name = "ctr(aes)",
2139*4882a593Smuzhiyun .base.cra_driver_name = "ctr-aes-s5p",
2140*4882a593Smuzhiyun .base.cra_priority = 100,
2141*4882a593Smuzhiyun .base.cra_flags = CRYPTO_ALG_ASYNC |
2142*4882a593Smuzhiyun CRYPTO_ALG_KERN_DRIVER_ONLY,
2143*4882a593Smuzhiyun .base.cra_blocksize = 1,
2144*4882a593Smuzhiyun .base.cra_ctxsize = sizeof(struct s5p_aes_ctx),
2145*4882a593Smuzhiyun .base.cra_alignmask = 0x0f,
2146*4882a593Smuzhiyun .base.cra_module = THIS_MODULE,
2147*4882a593Smuzhiyun
2148*4882a593Smuzhiyun .min_keysize = AES_MIN_KEY_SIZE,
2149*4882a593Smuzhiyun .max_keysize = AES_MAX_KEY_SIZE,
2150*4882a593Smuzhiyun .ivsize = AES_BLOCK_SIZE,
2151*4882a593Smuzhiyun .setkey = s5p_aes_setkey,
2152*4882a593Smuzhiyun .encrypt = s5p_aes_ctr_crypt,
2153*4882a593Smuzhiyun .decrypt = s5p_aes_ctr_crypt,
2154*4882a593Smuzhiyun .init = s5p_aes_init_tfm,
2155*4882a593Smuzhiyun },
2156*4882a593Smuzhiyun };
2157*4882a593Smuzhiyun
s5p_aes_probe(struct platform_device * pdev)2158*4882a593Smuzhiyun static int s5p_aes_probe(struct platform_device *pdev)
2159*4882a593Smuzhiyun {
2160*4882a593Smuzhiyun struct device *dev = &pdev->dev;
2161*4882a593Smuzhiyun int i, j, err = -ENODEV;
2162*4882a593Smuzhiyun const struct samsung_aes_variant *variant;
2163*4882a593Smuzhiyun struct s5p_aes_dev *pdata;
2164*4882a593Smuzhiyun struct resource *res;
2165*4882a593Smuzhiyun unsigned int hash_i;
2166*4882a593Smuzhiyun
2167*4882a593Smuzhiyun if (s5p_dev)
2168*4882a593Smuzhiyun return -EEXIST;
2169*4882a593Smuzhiyun
2170*4882a593Smuzhiyun pdata = devm_kzalloc(dev, sizeof(*pdata), GFP_KERNEL);
2171*4882a593Smuzhiyun if (!pdata)
2172*4882a593Smuzhiyun return -ENOMEM;
2173*4882a593Smuzhiyun
2174*4882a593Smuzhiyun variant = find_s5p_sss_version(pdev);
2175*4882a593Smuzhiyun res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
2176*4882a593Smuzhiyun if (!res)
2177*4882a593Smuzhiyun return -EINVAL;
2178*4882a593Smuzhiyun
2179*4882a593Smuzhiyun /*
2180*4882a593Smuzhiyun * Note: HASH and PRNG uses the same registers in secss, avoid
2181*4882a593Smuzhiyun * overwrite each other. This will drop HASH when CONFIG_EXYNOS_RNG
2182*4882a593Smuzhiyun * is enabled in config. We need larger size for HASH registers in
2183*4882a593Smuzhiyun * secss, current describe only AES/DES
2184*4882a593Smuzhiyun */
2185*4882a593Smuzhiyun if (IS_ENABLED(CONFIG_CRYPTO_DEV_EXYNOS_HASH)) {
2186*4882a593Smuzhiyun if (variant == &exynos_aes_data) {
2187*4882a593Smuzhiyun res->end += 0x300;
2188*4882a593Smuzhiyun pdata->use_hash = true;
2189*4882a593Smuzhiyun }
2190*4882a593Smuzhiyun }
2191*4882a593Smuzhiyun
2192*4882a593Smuzhiyun pdata->res = res;
2193*4882a593Smuzhiyun pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
2194*4882a593Smuzhiyun if (IS_ERR(pdata->ioaddr)) {
2195*4882a593Smuzhiyun if (!pdata->use_hash)
2196*4882a593Smuzhiyun return PTR_ERR(pdata->ioaddr);
2197*4882a593Smuzhiyun /* try AES without HASH */
2198*4882a593Smuzhiyun res->end -= 0x300;
2199*4882a593Smuzhiyun pdata->use_hash = false;
2200*4882a593Smuzhiyun pdata->ioaddr = devm_ioremap_resource(&pdev->dev, res);
2201*4882a593Smuzhiyun if (IS_ERR(pdata->ioaddr))
2202*4882a593Smuzhiyun return PTR_ERR(pdata->ioaddr);
2203*4882a593Smuzhiyun }
2204*4882a593Smuzhiyun
2205*4882a593Smuzhiyun pdata->clk = devm_clk_get(dev, variant->clk_names[0]);
2206*4882a593Smuzhiyun if (IS_ERR(pdata->clk))
2207*4882a593Smuzhiyun return dev_err_probe(dev, PTR_ERR(pdata->clk),
2208*4882a593Smuzhiyun "failed to find secss clock %s\n",
2209*4882a593Smuzhiyun variant->clk_names[0]);
2210*4882a593Smuzhiyun
2211*4882a593Smuzhiyun err = clk_prepare_enable(pdata->clk);
2212*4882a593Smuzhiyun if (err < 0) {
2213*4882a593Smuzhiyun dev_err(dev, "Enabling clock %s failed, err %d\n",
2214*4882a593Smuzhiyun variant->clk_names[0], err);
2215*4882a593Smuzhiyun return err;
2216*4882a593Smuzhiyun }
2217*4882a593Smuzhiyun
2218*4882a593Smuzhiyun if (variant->clk_names[1]) {
2219*4882a593Smuzhiyun pdata->pclk = devm_clk_get(dev, variant->clk_names[1]);
2220*4882a593Smuzhiyun if (IS_ERR(pdata->pclk)) {
2221*4882a593Smuzhiyun err = dev_err_probe(dev, PTR_ERR(pdata->pclk),
2222*4882a593Smuzhiyun "failed to find clock %s\n",
2223*4882a593Smuzhiyun variant->clk_names[1]);
2224*4882a593Smuzhiyun goto err_clk;
2225*4882a593Smuzhiyun }
2226*4882a593Smuzhiyun
2227*4882a593Smuzhiyun err = clk_prepare_enable(pdata->pclk);
2228*4882a593Smuzhiyun if (err < 0) {
2229*4882a593Smuzhiyun dev_err(dev, "Enabling clock %s failed, err %d\n",
2230*4882a593Smuzhiyun variant->clk_names[0], err);
2231*4882a593Smuzhiyun goto err_clk;
2232*4882a593Smuzhiyun }
2233*4882a593Smuzhiyun } else {
2234*4882a593Smuzhiyun pdata->pclk = NULL;
2235*4882a593Smuzhiyun }
2236*4882a593Smuzhiyun
2237*4882a593Smuzhiyun spin_lock_init(&pdata->lock);
2238*4882a593Smuzhiyun spin_lock_init(&pdata->hash_lock);
2239*4882a593Smuzhiyun
2240*4882a593Smuzhiyun pdata->aes_ioaddr = pdata->ioaddr + variant->aes_offset;
2241*4882a593Smuzhiyun pdata->io_hash_base = pdata->ioaddr + variant->hash_offset;
2242*4882a593Smuzhiyun
2243*4882a593Smuzhiyun pdata->irq_fc = platform_get_irq(pdev, 0);
2244*4882a593Smuzhiyun if (pdata->irq_fc < 0) {
2245*4882a593Smuzhiyun err = pdata->irq_fc;
2246*4882a593Smuzhiyun dev_warn(dev, "feed control interrupt is not available.\n");
2247*4882a593Smuzhiyun goto err_irq;
2248*4882a593Smuzhiyun }
2249*4882a593Smuzhiyun err = devm_request_threaded_irq(dev, pdata->irq_fc, NULL,
2250*4882a593Smuzhiyun s5p_aes_interrupt, IRQF_ONESHOT,
2251*4882a593Smuzhiyun pdev->name, pdev);
2252*4882a593Smuzhiyun if (err < 0) {
2253*4882a593Smuzhiyun dev_warn(dev, "feed control interrupt is not available.\n");
2254*4882a593Smuzhiyun goto err_irq;
2255*4882a593Smuzhiyun }
2256*4882a593Smuzhiyun
2257*4882a593Smuzhiyun pdata->busy = false;
2258*4882a593Smuzhiyun pdata->dev = dev;
2259*4882a593Smuzhiyun platform_set_drvdata(pdev, pdata);
2260*4882a593Smuzhiyun s5p_dev = pdata;
2261*4882a593Smuzhiyun
2262*4882a593Smuzhiyun tasklet_init(&pdata->tasklet, s5p_tasklet_cb, (unsigned long)pdata);
2263*4882a593Smuzhiyun crypto_init_queue(&pdata->queue, CRYPTO_QUEUE_LEN);
2264*4882a593Smuzhiyun
2265*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(algs); i++) {
2266*4882a593Smuzhiyun err = crypto_register_skcipher(&algs[i]);
2267*4882a593Smuzhiyun if (err)
2268*4882a593Smuzhiyun goto err_algs;
2269*4882a593Smuzhiyun }
2270*4882a593Smuzhiyun
2271*4882a593Smuzhiyun if (pdata->use_hash) {
2272*4882a593Smuzhiyun tasklet_init(&pdata->hash_tasklet, s5p_hash_tasklet_cb,
2273*4882a593Smuzhiyun (unsigned long)pdata);
2274*4882a593Smuzhiyun crypto_init_queue(&pdata->hash_queue, SSS_HASH_QUEUE_LENGTH);
2275*4882a593Smuzhiyun
2276*4882a593Smuzhiyun for (hash_i = 0; hash_i < ARRAY_SIZE(algs_sha1_md5_sha256);
2277*4882a593Smuzhiyun hash_i++) {
2278*4882a593Smuzhiyun struct ahash_alg *alg;
2279*4882a593Smuzhiyun
2280*4882a593Smuzhiyun alg = &algs_sha1_md5_sha256[hash_i];
2281*4882a593Smuzhiyun err = crypto_register_ahash(alg);
2282*4882a593Smuzhiyun if (err) {
2283*4882a593Smuzhiyun dev_err(dev, "can't register '%s': %d\n",
2284*4882a593Smuzhiyun alg->halg.base.cra_driver_name, err);
2285*4882a593Smuzhiyun goto err_hash;
2286*4882a593Smuzhiyun }
2287*4882a593Smuzhiyun }
2288*4882a593Smuzhiyun }
2289*4882a593Smuzhiyun
2290*4882a593Smuzhiyun dev_info(dev, "s5p-sss driver registered\n");
2291*4882a593Smuzhiyun
2292*4882a593Smuzhiyun return 0;
2293*4882a593Smuzhiyun
2294*4882a593Smuzhiyun err_hash:
2295*4882a593Smuzhiyun for (j = hash_i - 1; j >= 0; j--)
2296*4882a593Smuzhiyun crypto_unregister_ahash(&algs_sha1_md5_sha256[j]);
2297*4882a593Smuzhiyun
2298*4882a593Smuzhiyun tasklet_kill(&pdata->hash_tasklet);
2299*4882a593Smuzhiyun res->end -= 0x300;
2300*4882a593Smuzhiyun
2301*4882a593Smuzhiyun err_algs:
2302*4882a593Smuzhiyun if (i < ARRAY_SIZE(algs))
2303*4882a593Smuzhiyun dev_err(dev, "can't register '%s': %d\n", algs[i].base.cra_name,
2304*4882a593Smuzhiyun err);
2305*4882a593Smuzhiyun
2306*4882a593Smuzhiyun for (j = 0; j < i; j++)
2307*4882a593Smuzhiyun crypto_unregister_skcipher(&algs[j]);
2308*4882a593Smuzhiyun
2309*4882a593Smuzhiyun tasklet_kill(&pdata->tasklet);
2310*4882a593Smuzhiyun
2311*4882a593Smuzhiyun err_irq:
2312*4882a593Smuzhiyun clk_disable_unprepare(pdata->pclk);
2313*4882a593Smuzhiyun
2314*4882a593Smuzhiyun err_clk:
2315*4882a593Smuzhiyun clk_disable_unprepare(pdata->clk);
2316*4882a593Smuzhiyun s5p_dev = NULL;
2317*4882a593Smuzhiyun
2318*4882a593Smuzhiyun return err;
2319*4882a593Smuzhiyun }
2320*4882a593Smuzhiyun
s5p_aes_remove(struct platform_device * pdev)2321*4882a593Smuzhiyun static int s5p_aes_remove(struct platform_device *pdev)
2322*4882a593Smuzhiyun {
2323*4882a593Smuzhiyun struct s5p_aes_dev *pdata = platform_get_drvdata(pdev);
2324*4882a593Smuzhiyun int i;
2325*4882a593Smuzhiyun
2326*4882a593Smuzhiyun if (!pdata)
2327*4882a593Smuzhiyun return -ENODEV;
2328*4882a593Smuzhiyun
2329*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(algs); i++)
2330*4882a593Smuzhiyun crypto_unregister_skcipher(&algs[i]);
2331*4882a593Smuzhiyun
2332*4882a593Smuzhiyun tasklet_kill(&pdata->tasklet);
2333*4882a593Smuzhiyun if (pdata->use_hash) {
2334*4882a593Smuzhiyun for (i = ARRAY_SIZE(algs_sha1_md5_sha256) - 1; i >= 0; i--)
2335*4882a593Smuzhiyun crypto_unregister_ahash(&algs_sha1_md5_sha256[i]);
2336*4882a593Smuzhiyun
2337*4882a593Smuzhiyun pdata->res->end -= 0x300;
2338*4882a593Smuzhiyun tasklet_kill(&pdata->hash_tasklet);
2339*4882a593Smuzhiyun pdata->use_hash = false;
2340*4882a593Smuzhiyun }
2341*4882a593Smuzhiyun
2342*4882a593Smuzhiyun clk_disable_unprepare(pdata->pclk);
2343*4882a593Smuzhiyun
2344*4882a593Smuzhiyun clk_disable_unprepare(pdata->clk);
2345*4882a593Smuzhiyun s5p_dev = NULL;
2346*4882a593Smuzhiyun
2347*4882a593Smuzhiyun return 0;
2348*4882a593Smuzhiyun }
2349*4882a593Smuzhiyun
2350*4882a593Smuzhiyun static struct platform_driver s5p_aes_crypto = {
2351*4882a593Smuzhiyun .probe = s5p_aes_probe,
2352*4882a593Smuzhiyun .remove = s5p_aes_remove,
2353*4882a593Smuzhiyun .driver = {
2354*4882a593Smuzhiyun .name = "s5p-secss",
2355*4882a593Smuzhiyun .of_match_table = s5p_sss_dt_match,
2356*4882a593Smuzhiyun },
2357*4882a593Smuzhiyun };
2358*4882a593Smuzhiyun
2359*4882a593Smuzhiyun module_platform_driver(s5p_aes_crypto);
2360*4882a593Smuzhiyun
2361*4882a593Smuzhiyun MODULE_DESCRIPTION("S5PV210 AES hw acceleration support.");
2362*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
2363*4882a593Smuzhiyun MODULE_AUTHOR("Vladimir Zapolskiy <vzapolskiy@gmail.com>");
2364*4882a593Smuzhiyun MODULE_AUTHOR("Kamil Konieczny <k.konieczny@partner.samsung.com>");
2365