1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Hash acceleration support for Rockchip Crypto v2
4 *
5 * Copyright (c) 2020, Rockchip Electronics Co., Ltd
6 *
7 * Author: Lin Jinhan <troy.lin@rock-chips.com>
8 *
9 * Some ideas are from marvell/cesa.c and s5p-sss.c driver.
10 */
11
12 #include <linux/slab.h>
13 #include <linux/iopoll.h>
14
15 #include "rk_crypto_core.h"
16 #include "rk_crypto_v2.h"
17 #include "rk_crypto_v2_reg.h"
18 #include "rk_crypto_ahash_utils.h"
19 #include "rk_crypto_utils.h"
20
21 #define RK_HASH_CTX_MAGIC 0x1A1A1A1A
22 #define RK_POLL_PERIOD_US 100
23 #define RK_POLL_TIMEOUT_US 50000
24
25 struct rk_ahash_expt_ctx {
26 struct rk_ahash_ctx ctx;
27 u8 lastc[RK_DMA_ALIGNMENT];
28 };
29
30 static const u32 hash_algo2bc[] = {
31 [HASH_ALGO_MD5] = CRYPTO_MD5,
32 [HASH_ALGO_SHA1] = CRYPTO_SHA1,
33 [HASH_ALGO_SHA224] = CRYPTO_SHA224,
34 [HASH_ALGO_SHA256] = CRYPTO_SHA256,
35 [HASH_ALGO_SHA384] = CRYPTO_SHA384,
36 [HASH_ALGO_SHA512] = CRYPTO_SHA512,
37 [HASH_ALGO_SM3] = CRYPTO_SM3,
38 };
39
rk_hash_reset(struct rk_crypto_dev * rk_dev)40 static void rk_hash_reset(struct rk_crypto_dev *rk_dev)
41 {
42 int ret;
43 u32 tmp = 0, tmp_mask = 0;
44 unsigned int pool_timeout_us = 1000;
45
46 CRYPTO_WRITE(rk_dev, CRYPTO_DMA_INT_EN, 0x00);
47
48 tmp = CRYPTO_SW_CC_RESET;
49 tmp_mask = tmp << CRYPTO_WRITE_MASK_SHIFT;
50
51 CRYPTO_WRITE(rk_dev, CRYPTO_RST_CTL, tmp | tmp_mask);
52
53 /* This is usually done in 20 clock cycles */
54 ret = read_poll_timeout_atomic(CRYPTO_READ, tmp, !tmp, 0, pool_timeout_us,
55 false, rk_dev, CRYPTO_RST_CTL);
56 if (ret)
57 dev_err(rk_dev->dev, "cipher reset pool timeout %ums.",
58 pool_timeout_us);
59
60 CRYPTO_WRITE(rk_dev, CRYPTO_HASH_CTL, 0xffff0000);
61 }
62
rk_crypto_irq_handle(int irq,void * dev_id)63 static int rk_crypto_irq_handle(int irq, void *dev_id)
64 {
65 struct rk_crypto_dev *rk_dev = platform_get_drvdata(dev_id);
66 u32 interrupt_status;
67 struct rk_hw_crypto_v2_info *hw_info =
68 (struct rk_hw_crypto_v2_info *)rk_dev->hw_info;
69 struct rk_alg_ctx *alg_ctx = rk_ahash_alg_ctx(rk_dev);
70
71 /* disable crypto irq */
72 CRYPTO_WRITE(rk_dev, CRYPTO_DMA_INT_EN, 0);
73
74 interrupt_status = CRYPTO_READ(rk_dev, CRYPTO_DMA_INT_ST);
75 CRYPTO_WRITE(rk_dev, CRYPTO_DMA_INT_ST, interrupt_status);
76
77 interrupt_status &= CRYPTO_LOCKSTEP_MASK;
78
79 if (interrupt_status != CRYPTO_SRC_ITEM_DONE_INT_ST) {
80 dev_err(rk_dev->dev, "DMA desc = %p\n", hw_info->hw_desc.lli_head);
81 dev_err(rk_dev->dev, "DMA addr_in = %08x\n",
82 (u32)alg_ctx->addr_in);
83 dev_err(rk_dev->dev, "DMA addr_out = %08x\n",
84 (u32)alg_ctx->addr_out);
85 dev_err(rk_dev->dev, "DMA count = %08x\n", alg_ctx->count);
86 dev_err(rk_dev->dev, "DMA desc_dma = %08x\n",
87 (u32)hw_info->hw_desc.lli_head_dma);
88 dev_err(rk_dev->dev, "DMA Error status = %08x\n",
89 interrupt_status);
90 dev_err(rk_dev->dev, "DMA CRYPTO_DMA_LLI_ADDR status = %08x\n",
91 CRYPTO_READ(rk_dev, CRYPTO_DMA_LLI_ADDR));
92 dev_err(rk_dev->dev, "DMA CRYPTO_DMA_ST status = %08x\n",
93 CRYPTO_READ(rk_dev, CRYPTO_DMA_ST));
94 dev_err(rk_dev->dev, "DMA CRYPTO_DMA_STATE status = %08x\n",
95 CRYPTO_READ(rk_dev, CRYPTO_DMA_STATE));
96 dev_err(rk_dev->dev, "DMA CRYPTO_DMA_LLI_RADDR status = %08x\n",
97 CRYPTO_READ(rk_dev, CRYPTO_DMA_LLI_RADDR));
98 dev_err(rk_dev->dev, "DMA CRYPTO_DMA_SRC_RADDR status = %08x\n",
99 CRYPTO_READ(rk_dev, CRYPTO_DMA_SRC_RADDR));
100 dev_err(rk_dev->dev, "DMA CRYPTO_DMA_DST_RADDR status = %08x\n",
101 CRYPTO_READ(rk_dev, CRYPTO_DMA_DST_RADDR));
102 rk_dev->err = -EFAULT;
103 }
104
105 return 0;
106 }
107
rk_ahash_crypto_complete(struct crypto_async_request * base,int err)108 static void rk_ahash_crypto_complete(struct crypto_async_request *base, int err)
109 {
110 struct ahash_request *req = ahash_request_cast(base);
111 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
112 struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
113 struct rk_alg_ctx *alg_ctx = rk_ahash_alg_ctx(ctx->rk_dev);
114
115 struct rk_hw_crypto_v2_info *hw_info = ctx->rk_dev->hw_info;
116 struct crypto_lli_desc *lli_desc = hw_info->hw_desc.lli_head;
117
118 if (err) {
119 rk_hash_reset(ctx->rk_dev);
120 pr_err("aligned = %u, align_size = %u\n",
121 alg_ctx->aligned, alg_ctx->align_size);
122 pr_err("total = %u, left = %u, count = %u\n",
123 alg_ctx->total, alg_ctx->left_bytes, alg_ctx->count);
124 pr_err("lli->src = %08x\n", lli_desc->src_addr);
125 pr_err("lli->src_len = %08x\n", lli_desc->src_len);
126 pr_err("lli->dst = %08x\n", lli_desc->dst_addr);
127 pr_err("lli->dst_len = %08x\n", lli_desc->dst_len);
128 pr_err("lli->dma_ctl = %08x\n", lli_desc->dma_ctrl);
129 pr_err("lli->usr_def = %08x\n", lli_desc->user_define);
130 pr_err("lli->next = %08x\n\n\n", lli_desc->next_addr);
131 }
132
133 if (base->complete)
134 base->complete(base, err);
135 }
136
clear_hash_out_reg(struct rk_crypto_dev * rk_dev)137 static inline void clear_hash_out_reg(struct rk_crypto_dev *rk_dev)
138 {
139 rk_crypto_clear_regs(rk_dev, CRYPTO_HASH_DOUT_0, 16);
140 }
141
write_key_reg(struct rk_crypto_dev * rk_dev,const u8 * key,u32 key_len)142 static int write_key_reg(struct rk_crypto_dev *rk_dev, const u8 *key,
143 u32 key_len)
144 {
145 rk_crypto_write_regs(rk_dev, CRYPTO_CH0_KEY_0, key, key_len);
146
147 return 0;
148 }
149
rk_hw_hash_init(struct rk_crypto_dev * rk_dev,u32 algo,u32 type)150 static int rk_hw_hash_init(struct rk_crypto_dev *rk_dev, u32 algo, u32 type)
151 {
152 u32 reg_ctrl = 0;
153
154 if (algo >= ARRAY_SIZE(hash_algo2bc))
155 goto exit;
156
157 rk_hash_reset(rk_dev);
158
159 clear_hash_out_reg(rk_dev);
160
161 reg_ctrl = hash_algo2bc[algo] | CRYPTO_HW_PAD_ENABLE;
162
163 if (IS_TYPE_HMAC(type)) {
164 CRYPTO_TRACE("this is hmac");
165 reg_ctrl |= CRYPTO_HMAC_ENABLE;
166 }
167
168 CRYPTO_WRITE(rk_dev, CRYPTO_HASH_CTL, reg_ctrl | CRYPTO_WRITE_MASK_ALL);
169 CRYPTO_WRITE(rk_dev, CRYPTO_FIFO_CTL, 0x00030003);
170
171 return 0;
172 exit:
173 CRYPTO_WRITE(rk_dev, CRYPTO_HASH_CTL, 0 | CRYPTO_WRITE_MASK_ALL);
174
175 return -EINVAL;
176 }
177
clean_hash_setting(struct rk_crypto_dev * rk_dev)178 static void clean_hash_setting(struct rk_crypto_dev *rk_dev)
179 {
180 CRYPTO_WRITE(rk_dev, CRYPTO_DMA_INT_EN, 0);
181 CRYPTO_WRITE(rk_dev, CRYPTO_HASH_CTL, 0 | CRYPTO_WRITE_MASK_ALL);
182 }
183
rk_ahash_import(struct ahash_request * req,const void * in)184 static int rk_ahash_import(struct ahash_request *req, const void *in)
185 {
186 struct rk_ahash_expt_ctx state;
187
188 /* 'in' may not be aligned so memcpy to local variable */
189 memcpy(&state, in, sizeof(state));
190
191 ///TODO: deal with import
192
193 return 0;
194 }
195
rk_ahash_export(struct ahash_request * req,void * out)196 static int rk_ahash_export(struct ahash_request *req, void *out)
197 {
198 struct rk_ahash_expt_ctx state;
199
200 /* Don't let anything leak to 'out' */
201 memset(&state, 0, sizeof(state));
202
203 ///TODO: deal with import
204
205 memcpy(out, &state, sizeof(state));
206
207 return 0;
208 }
209
rk_ahash_dma_start(struct rk_crypto_dev * rk_dev,uint32_t flag)210 static int rk_ahash_dma_start(struct rk_crypto_dev *rk_dev, uint32_t flag)
211 {
212 struct rk_hw_crypto_v2_info *hw_info =
213 (struct rk_hw_crypto_v2_info *)rk_dev->hw_info;
214 struct rk_alg_ctx *alg_ctx = rk_ahash_alg_ctx(rk_dev);
215 struct rk_ahash_ctx *ctx = rk_ahash_ctx_cast(rk_dev);
216 struct crypto_lli_desc *lli_head, *lli_tail;
217 u32 dma_ctl = CRYPTO_DMA_RESTART;
218 bool is_final = flag & RK_FLAG_FINAL;
219 int ret;
220
221 CRYPTO_TRACE("ctx->calc_cnt = %u, count %u Byte, is_final = %d",
222 ctx->calc_cnt, alg_ctx->count, is_final);
223
224 if (alg_ctx->count % RK_DMA_ALIGNMENT && !is_final) {
225 dev_err(rk_dev->dev, "count = %u is not aligned with [%u]\n",
226 alg_ctx->count, RK_DMA_ALIGNMENT);
227 return -EINVAL;
228 }
229
230 if (alg_ctx->count == 0) {
231 /* do nothing */
232 CRYPTO_TRACE("empty calc");
233 return 0;
234 }
235
236 if (alg_ctx->aligned)
237 ret = rk_crypto_hw_desc_init(&hw_info->hw_desc,
238 alg_ctx->sg_src, NULL, alg_ctx->count);
239 else
240 ret = rk_crypto_hw_desc_init(&hw_info->hw_desc,
241 &alg_ctx->sg_tmp, NULL, alg_ctx->count);
242 if (ret)
243 return ret;
244
245 lli_head = hw_info->hw_desc.lli_head;
246 lli_tail = hw_info->hw_desc.lli_tail;
247
248 lli_tail->dma_ctrl = is_final ? LLI_DMA_CTRL_LAST : LLI_DMA_CTRL_PAUSE;
249 lli_tail->dma_ctrl |= LLI_DMA_CTRL_SRC_DONE;
250 lli_tail->next_addr = hw_info->hw_desc.lli_head_dma;
251
252 if (ctx->calc_cnt == 0) {
253 dma_ctl = CRYPTO_DMA_START;
254
255 lli_head->user_define |= LLI_USER_CIPHER_START;
256 lli_head->user_define |= LLI_USER_STRING_START;
257
258 CRYPTO_WRITE(rk_dev, CRYPTO_DMA_LLI_ADDR, hw_info->hw_desc.lli_head_dma);
259 CRYPTO_WRITE(rk_dev, CRYPTO_HASH_CTL,
260 (CRYPTO_HASH_ENABLE << CRYPTO_WRITE_MASK_SHIFT) |
261 CRYPTO_HASH_ENABLE);
262 }
263
264 if (is_final && alg_ctx->left_bytes == 0)
265 lli_tail->user_define |= LLI_USER_STRING_LAST;
266
267 CRYPTO_TRACE("dma_ctrl = %08x, user_define = %08x, len = %u",
268 lli_head->dma_ctrl, lli_head->user_define, alg_ctx->count);
269
270 rk_crypto_dump_hw_desc(&hw_info->hw_desc);
271
272 dma_wmb();
273
274 /* enable crypto irq */
275 CRYPTO_WRITE(rk_dev, CRYPTO_DMA_INT_EN, 0x7f);
276
277 CRYPTO_WRITE(rk_dev, CRYPTO_DMA_CTL, dma_ctl | dma_ctl << CRYPTO_WRITE_MASK_SHIFT);
278
279 return 0;
280 }
281
rk_ahash_get_result(struct rk_crypto_dev * rk_dev,uint8_t * data,uint32_t data_len)282 static int rk_ahash_get_result(struct rk_crypto_dev *rk_dev,
283 uint8_t *data, uint32_t data_len)
284 {
285 int ret = 0;
286 u32 reg_ctrl = 0;
287
288 ret = read_poll_timeout_atomic(CRYPTO_READ, reg_ctrl,
289 reg_ctrl & CRYPTO_HASH_IS_VALID,
290 RK_POLL_PERIOD_US,
291 RK_POLL_TIMEOUT_US, false,
292 rk_dev, CRYPTO_HASH_VALID);
293 if (ret)
294 goto exit;
295
296 rk_crypto_read_regs(rk_dev, CRYPTO_HASH_DOUT_0, data, data_len);
297
298 CRYPTO_WRITE(rk_dev, CRYPTO_HASH_VALID, CRYPTO_HASH_IS_VALID);
299
300 exit:
301 clean_hash_setting(rk_dev);
302
303 return ret;
304 }
305
rk_cra_hash_init(struct crypto_tfm * tfm)306 static int rk_cra_hash_init(struct crypto_tfm *tfm)
307 {
308 struct rk_crypto_algt *algt =
309 rk_ahash_get_algt(__crypto_ahash_cast(tfm));
310 const char *alg_name = crypto_tfm_alg_name(tfm);
311 struct rk_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
312 struct rk_crypto_dev *rk_dev = algt->rk_dev;
313 struct rk_alg_ctx *alg_ctx = &ctx->algs_ctx;
314
315 CRYPTO_TRACE();
316
317 memset(ctx, 0x00, sizeof(*ctx));
318
319 if (!rk_dev->request_crypto)
320 return -EFAULT;
321
322 alg_ctx->align_size = RK_DMA_ALIGNMENT;
323
324 alg_ctx->ops.start = rk_ahash_start;
325 alg_ctx->ops.update = rk_ahash_crypto_rx;
326 alg_ctx->ops.complete = rk_ahash_crypto_complete;
327 alg_ctx->ops.irq_handle = rk_crypto_irq_handle;
328
329 alg_ctx->ops.hw_write_key = write_key_reg;
330 alg_ctx->ops.hw_init = rk_hw_hash_init;
331 alg_ctx->ops.hw_dma_start = rk_ahash_dma_start;
332 alg_ctx->ops.hw_get_result = rk_ahash_get_result;
333
334 ctx->rk_dev = rk_dev;
335 ctx->hash_tmp = (u8 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
336 if (!ctx->hash_tmp) {
337 dev_err(rk_dev->dev, "Can't get zeroed page for hash tmp.\n");
338 return -ENOMEM;
339 }
340
341 rk_dev->request_crypto(rk_dev, alg_name);
342
343 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct rk_ahash_rctx));
344
345 algt->alg.hash.halg.statesize = sizeof(struct rk_ahash_expt_ctx);
346
347 return 0;
348 }
349
rk_cra_hash_exit(struct crypto_tfm * tfm)350 static void rk_cra_hash_exit(struct crypto_tfm *tfm)
351 {
352 struct rk_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
353
354 CRYPTO_TRACE();
355
356 if (ctx->hash_tmp)
357 free_page((unsigned long)ctx->hash_tmp);
358
359 ctx->rk_dev->release_crypto(ctx->rk_dev, crypto_tfm_alg_name(tfm));
360 }
361
362 struct rk_crypto_algt rk_v2_ahash_md5 = RK_HASH_ALGO_INIT(MD5, md5);
363 struct rk_crypto_algt rk_v2_ahash_sha1 = RK_HASH_ALGO_INIT(SHA1, sha1);
364 struct rk_crypto_algt rk_v2_ahash_sha224 = RK_HASH_ALGO_INIT(SHA224, sha224);
365 struct rk_crypto_algt rk_v2_ahash_sha256 = RK_HASH_ALGO_INIT(SHA256, sha256);
366 struct rk_crypto_algt rk_v2_ahash_sha384 = RK_HASH_ALGO_INIT(SHA384, sha384);
367 struct rk_crypto_algt rk_v2_ahash_sha512 = RK_HASH_ALGO_INIT(SHA512, sha512);
368 struct rk_crypto_algt rk_v2_ahash_sm3 = RK_HASH_ALGO_INIT(SM3, sm3);
369
370 struct rk_crypto_algt rk_v2_hmac_md5 = RK_HMAC_ALGO_INIT(MD5, md5);
371 struct rk_crypto_algt rk_v2_hmac_sha1 = RK_HMAC_ALGO_INIT(SHA1, sha1);
372 struct rk_crypto_algt rk_v2_hmac_sha256 = RK_HMAC_ALGO_INIT(SHA256, sha256);
373 struct rk_crypto_algt rk_v2_hmac_sha512 = RK_HMAC_ALGO_INIT(SHA512, sha512);
374 struct rk_crypto_algt rk_v2_hmac_sm3 = RK_HMAC_ALGO_INIT(SM3, sm3);
375
376