1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Hash acceleration support for Rockchip Crypto v3
4 *
5 * Copyright (c) 2022, Rockchip Electronics Co., Ltd
6 *
7 * Author: Lin Jinhan <troy.lin@rock-chips.com>
8 *
9 */
10
11 #include <linux/slab.h>
12 #include <linux/iopoll.h>
13
14 #include "rk_crypto_core.h"
15 #include "rk_crypto_v3.h"
16 #include "rk_crypto_v3_reg.h"
17 #include "rk_crypto_ahash_utils.h"
18 #include "rk_crypto_utils.h"
19
20 #define RK_HASH_CTX_MAGIC 0x1A1A1A1A
21 #define RK_POLL_PERIOD_US 100
22 #define RK_POLL_TIMEOUT_US 50000
23
24 struct rk_ahash_expt_ctx {
25 struct rk_ahash_ctx ctx;
26 u8 lastc[RK_DMA_ALIGNMENT];
27 };
28
29 struct rk_hash_mid_data {
30 u32 valid_flag;
31 u32 hash_ctl;
32 u32 data[CRYPTO_HASH_MID_WORD_SIZE];
33 };
34
35 static const u32 hash_algo2bc[] = {
36 [HASH_ALGO_MD5] = CRYPTO_MD5,
37 [HASH_ALGO_SHA1] = CRYPTO_SHA1,
38 [HASH_ALGO_SHA224] = CRYPTO_SHA224,
39 [HASH_ALGO_SHA256] = CRYPTO_SHA256,
40 [HASH_ALGO_SHA384] = CRYPTO_SHA384,
41 [HASH_ALGO_SHA512] = CRYPTO_SHA512,
42 [HASH_ALGO_SM3] = CRYPTO_SM3,
43 };
44
rk_hash_reset(struct rk_crypto_dev * rk_dev)45 static void rk_hash_reset(struct rk_crypto_dev *rk_dev)
46 {
47 int ret;
48 u32 tmp = 0, tmp_mask = 0;
49 unsigned int pool_timeout_us = 1000;
50
51 CRYPTO_WRITE(rk_dev, CRYPTO_DMA_INT_EN, 0x00);
52
53 tmp = CRYPTO_SW_CC_RESET;
54 tmp_mask = tmp << CRYPTO_WRITE_MASK_SHIFT;
55
56 CRYPTO_WRITE(rk_dev, CRYPTO_RST_CTL, tmp | tmp_mask);
57
58 /* This is usually done in 20 clock cycles */
59 ret = read_poll_timeout_atomic(CRYPTO_READ, tmp, !tmp, 0, pool_timeout_us,
60 false, rk_dev, CRYPTO_RST_CTL);
61 if (ret)
62 dev_err(rk_dev->dev, "cipher reset pool timeout %ums.",
63 pool_timeout_us);
64
65 CRYPTO_WRITE(rk_dev, CRYPTO_HASH_CTL, 0xffff0000);
66 }
67
rk_hash_mid_data_store(struct rk_crypto_dev * rk_dev,struct rk_hash_mid_data * mid_data)68 static int rk_hash_mid_data_store(struct rk_crypto_dev *rk_dev, struct rk_hash_mid_data *mid_data)
69 {
70 int ret;
71 uint32_t reg_ctrl;
72
73 CRYPTO_TRACE();
74
75 ret = read_poll_timeout_atomic(CRYPTO_READ,
76 reg_ctrl,
77 reg_ctrl & CRYPTO_HASH_MID_IS_VALID,
78 0,
79 RK_POLL_TIMEOUT_US,
80 false, rk_dev, CRYPTO_MID_VALID);
81
82 CRYPTO_WRITE(rk_dev, CRYPTO_MID_VALID_SWITCH,
83 CRYPTO_MID_VALID_ENABLE << CRYPTO_WRITE_MASK_SHIFT);
84 if (ret) {
85 CRYPTO_TRACE("CRYPTO_MID_VALID timeout.");
86 goto exit;
87 }
88
89 CRYPTO_WRITE(rk_dev, CRYPTO_MID_VALID,
90 CRYPTO_HASH_MID_IS_VALID |
91 CRYPTO_HASH_MID_IS_VALID << CRYPTO_WRITE_MASK_SHIFT);
92
93 rk_crypto_read_regs(rk_dev, CRYPTO_HASH_MID_DATA_0,
94 (u8 *)mid_data->data, sizeof(mid_data->data));
95
96 mid_data->hash_ctl = CRYPTO_READ(rk_dev, CRYPTO_HASH_CTL);
97 mid_data->valid_flag = 1;
98
99 CRYPTO_WRITE(rk_dev, CRYPTO_HASH_CTL, 0 | CRYPTO_WRITE_MASK_ALL);
100
101 exit:
102 return ret;
103 }
104
rk_hash_mid_data_restore(struct rk_crypto_dev * rk_dev,struct rk_hash_mid_data * mid_data)105 static int rk_hash_mid_data_restore(struct rk_crypto_dev *rk_dev, struct rk_hash_mid_data *mid_data)
106 {
107 CRYPTO_TRACE();
108
109 CRYPTO_WRITE(rk_dev, CRYPTO_MID_VALID_SWITCH,
110 CRYPTO_MID_VALID_ENABLE | CRYPTO_MID_VALID_ENABLE << CRYPTO_WRITE_MASK_SHIFT);
111
112 CRYPTO_WRITE(rk_dev, CRYPTO_MID_VALID,
113 CRYPTO_HASH_MID_IS_VALID |
114 CRYPTO_HASH_MID_IS_VALID << CRYPTO_WRITE_MASK_SHIFT);
115
116 if (!mid_data->valid_flag) {
117 CRYPTO_TRACE("clear mid data");
118 rk_crypto_clear_regs(rk_dev, CRYPTO_HASH_MID_DATA_0, ARRAY_SIZE(mid_data->data));
119 return 0;
120 }
121
122 rk_crypto_write_regs(rk_dev, CRYPTO_HASH_MID_DATA_0,
123 (u8 *)mid_data->data, sizeof(mid_data->data));
124
125 CRYPTO_WRITE(rk_dev, CRYPTO_HASH_CTL, mid_data->hash_ctl | CRYPTO_WRITE_MASK_ALL);
126
127 return 0;
128 }
129
rk_crypto_irq_handle(int irq,void * dev_id)130 static int rk_crypto_irq_handle(int irq, void *dev_id)
131 {
132 struct rk_crypto_dev *rk_dev = platform_get_drvdata(dev_id);
133 u32 interrupt_status;
134 struct rk_hw_crypto_v3_info *hw_info =
135 (struct rk_hw_crypto_v3_info *)rk_dev->hw_info;
136 struct rk_alg_ctx *alg_ctx = rk_ahash_alg_ctx(rk_dev);
137
138 /* disable crypto irq */
139 CRYPTO_WRITE(rk_dev, CRYPTO_DMA_INT_EN, 0);
140
141 interrupt_status = CRYPTO_READ(rk_dev, CRYPTO_DMA_INT_ST);
142 CRYPTO_WRITE(rk_dev, CRYPTO_DMA_INT_ST, interrupt_status);
143
144 interrupt_status &= CRYPTO_LOCKSTEP_MASK;
145
146 if (interrupt_status != CRYPTO_SRC_ITEM_DONE_INT_ST) {
147 dev_err(rk_dev->dev, "DMA desc = %p\n", hw_info->hw_desc.lli_head);
148 dev_err(rk_dev->dev, "DMA addr_in = %08x\n",
149 (u32)alg_ctx->addr_in);
150 dev_err(rk_dev->dev, "DMA addr_out = %08x\n",
151 (u32)alg_ctx->addr_out);
152 dev_err(rk_dev->dev, "DMA count = %08x\n", alg_ctx->count);
153 dev_err(rk_dev->dev, "DMA desc_dma = %08x\n",
154 (u32)hw_info->hw_desc.lli_head_dma);
155 dev_err(rk_dev->dev, "DMA Error status = %08x\n",
156 interrupt_status);
157 dev_err(rk_dev->dev, "DMA CRYPTO_DMA_LLI_ADDR status = %08x\n",
158 CRYPTO_READ(rk_dev, CRYPTO_DMA_LLI_ADDR));
159 dev_err(rk_dev->dev, "DMA CRYPTO_DMA_ST status = %08x\n",
160 CRYPTO_READ(rk_dev, CRYPTO_DMA_ST));
161 dev_err(rk_dev->dev, "DMA CRYPTO_DMA_STATE status = %08x\n",
162 CRYPTO_READ(rk_dev, CRYPTO_DMA_STATE));
163 dev_err(rk_dev->dev, "DMA CRYPTO_DMA_LLI_RADDR status = %08x\n",
164 CRYPTO_READ(rk_dev, CRYPTO_DMA_LLI_RADDR));
165 dev_err(rk_dev->dev, "DMA CRYPTO_DMA_SRC_RADDR status = %08x\n",
166 CRYPTO_READ(rk_dev, CRYPTO_DMA_SRC_RADDR));
167 dev_err(rk_dev->dev, "DMA CRYPTO_DMA_DST_RADDR status = %08x\n",
168 CRYPTO_READ(rk_dev, CRYPTO_DMA_DST_RADDR));
169 rk_dev->err = -EFAULT;
170 }
171
172 return 0;
173 }
174
rk_ahash_crypto_complete(struct crypto_async_request * base,int err)175 static void rk_ahash_crypto_complete(struct crypto_async_request *base, int err)
176 {
177 struct ahash_request *req = ahash_request_cast(base);
178 struct crypto_ahash *tfm = crypto_ahash_reqtfm(req);
179 struct rk_ahash_ctx *ctx = crypto_ahash_ctx(tfm);
180 struct rk_alg_ctx *alg_ctx = rk_ahash_alg_ctx(ctx->rk_dev);
181
182 struct rk_hw_crypto_v3_info *hw_info = ctx->rk_dev->hw_info;
183 struct crypto_lli_desc *lli_desc = hw_info->hw_desc.lli_head;
184
185 if (err) {
186 rk_hash_reset(ctx->rk_dev);
187 pr_err("aligned = %u, align_size = %u\n",
188 alg_ctx->aligned, alg_ctx->align_size);
189 pr_err("total = %u, left = %u, count = %u\n",
190 alg_ctx->total, alg_ctx->left_bytes, alg_ctx->count);
191 pr_err("lli->src = %08x\n", lli_desc->src_addr);
192 pr_err("lli->src_len = %08x\n", lli_desc->src_len);
193 pr_err("lli->dst = %08x\n", lli_desc->dst_addr);
194 pr_err("lli->dst_len = %08x\n", lli_desc->dst_len);
195 pr_err("lli->dma_ctl = %08x\n", lli_desc->dma_ctrl);
196 pr_err("lli->usr_def = %08x\n", lli_desc->user_define);
197 pr_err("lli->next = %08x\n\n\n", lli_desc->next_addr);
198 }
199
200 if (alg_ctx->total)
201 rk_hash_mid_data_store(ctx->rk_dev, (struct rk_hash_mid_data *)ctx->priv);
202
203 if (base->complete)
204 base->complete(base, err);
205 }
206
clear_hash_out_reg(struct rk_crypto_dev * rk_dev)207 static inline void clear_hash_out_reg(struct rk_crypto_dev *rk_dev)
208 {
209 rk_crypto_clear_regs(rk_dev, CRYPTO_HASH_DOUT_0, 16);
210 }
211
write_key_reg(struct rk_crypto_dev * rk_dev,const u8 * key,u32 key_len)212 static int write_key_reg(struct rk_crypto_dev *rk_dev, const u8 *key,
213 u32 key_len)
214 {
215 rk_crypto_write_regs(rk_dev, CRYPTO_CH0_KEY_0, key, key_len);
216
217 return 0;
218 }
219
rk_hw_hash_init(struct rk_crypto_dev * rk_dev,u32 algo,u32 type)220 static int rk_hw_hash_init(struct rk_crypto_dev *rk_dev, u32 algo, u32 type)
221 {
222 u32 reg_ctrl = 0;
223 struct rk_ahash_ctx *ctx = rk_ahash_ctx_cast(rk_dev);
224 struct rk_hash_mid_data *mid_data = (struct rk_hash_mid_data *)ctx->priv;
225
226 if (algo >= ARRAY_SIZE(hash_algo2bc))
227 goto exit;
228
229 rk_hash_reset(rk_dev);
230
231 clear_hash_out_reg(rk_dev);
232
233 reg_ctrl = hash_algo2bc[algo] | CRYPTO_HW_PAD_ENABLE;
234
235 if (IS_TYPE_HMAC(type)) {
236 CRYPTO_TRACE("this is hmac");
237 reg_ctrl |= CRYPTO_HMAC_ENABLE;
238 }
239
240 CRYPTO_WRITE(rk_dev, CRYPTO_HASH_CTL, reg_ctrl | CRYPTO_WRITE_MASK_ALL);
241 CRYPTO_WRITE(rk_dev, CRYPTO_FIFO_CTL, 0x00030003);
242
243 memset(mid_data, 0x00, sizeof(*mid_data));
244
245 return 0;
246 exit:
247 CRYPTO_WRITE(rk_dev, CRYPTO_HASH_CTL, 0 | CRYPTO_WRITE_MASK_ALL);
248
249 return -EINVAL;
250 }
251
clean_hash_setting(struct rk_crypto_dev * rk_dev)252 static void clean_hash_setting(struct rk_crypto_dev *rk_dev)
253 {
254 CRYPTO_WRITE(rk_dev, CRYPTO_DMA_INT_EN, 0);
255 CRYPTO_WRITE(rk_dev, CRYPTO_HASH_CTL, 0 | CRYPTO_WRITE_MASK_ALL);
256 }
257
rk_ahash_import(struct ahash_request * req,const void * in)258 static int rk_ahash_import(struct ahash_request *req, const void *in)
259 {
260 struct rk_ahash_expt_ctx state;
261
262 /* 'in' may not be aligned so memcpy to local variable */
263 memcpy(&state, in, sizeof(state));
264
265 ///TODO: deal with import
266
267 return 0;
268 }
269
rk_ahash_export(struct ahash_request * req,void * out)270 static int rk_ahash_export(struct ahash_request *req, void *out)
271 {
272 struct rk_ahash_expt_ctx state;
273
274 /* Don't let anything leak to 'out' */
275 memset(&state, 0, sizeof(state));
276
277 ///TODO: deal with import
278
279 memcpy(out, &state, sizeof(state));
280
281 return 0;
282 }
283
rk_ahash_dma_start(struct rk_crypto_dev * rk_dev,uint32_t flag)284 static int rk_ahash_dma_start(struct rk_crypto_dev *rk_dev, uint32_t flag)
285 {
286 struct rk_hw_crypto_v3_info *hw_info =
287 (struct rk_hw_crypto_v3_info *)rk_dev->hw_info;
288 struct rk_alg_ctx *alg_ctx = rk_ahash_alg_ctx(rk_dev);
289 struct rk_ahash_ctx *ctx = rk_ahash_ctx_cast(rk_dev);
290 struct crypto_lli_desc *lli_head, *lli_tail;
291 u32 dma_ctl = CRYPTO_DMA_RESTART;
292 bool is_final = flag & RK_FLAG_FINAL;
293 int ret;
294
295 CRYPTO_TRACE("ctx->calc_cnt = %u, count %u Byte, is_final = %d",
296 ctx->calc_cnt, alg_ctx->count, is_final);
297
298 if (alg_ctx->count % RK_DMA_ALIGNMENT && !is_final) {
299 dev_err(rk_dev->dev, "count = %u is not aligned with [%u]\n",
300 alg_ctx->count, RK_DMA_ALIGNMENT);
301 return -EINVAL;
302 }
303
304 if (alg_ctx->count == 0) {
305 /* do nothing */
306 CRYPTO_TRACE("empty calc");
307 return 0;
308 }
309
310 if (alg_ctx->total == alg_ctx->left_bytes + alg_ctx->count)
311 rk_hash_mid_data_restore(rk_dev, (struct rk_hash_mid_data *)ctx->priv);
312
313 if (alg_ctx->aligned)
314 ret = rk_crypto_hw_desc_init(&hw_info->hw_desc,
315 alg_ctx->sg_src, NULL, alg_ctx->count);
316 else
317 ret = rk_crypto_hw_desc_init(&hw_info->hw_desc,
318 &alg_ctx->sg_tmp, NULL, alg_ctx->count);
319 if (ret)
320 return ret;
321
322 lli_head = hw_info->hw_desc.lli_head;
323 lli_tail = hw_info->hw_desc.lli_tail;
324
325 lli_tail->dma_ctrl = is_final ? LLI_DMA_CTRL_LAST : LLI_DMA_CTRL_PAUSE;
326 lli_tail->dma_ctrl |= LLI_DMA_CTRL_SRC_DONE;
327
328 if (ctx->calc_cnt == 0) {
329 dma_ctl = CRYPTO_DMA_START;
330
331 lli_head->user_define |= LLI_USER_CIPHER_START;
332 lli_head->user_define |= LLI_USER_STRING_START;
333
334 CRYPTO_WRITE(rk_dev, CRYPTO_DMA_LLI_ADDR, hw_info->hw_desc.lli_head_dma);
335 CRYPTO_WRITE(rk_dev, CRYPTO_HASH_CTL,
336 (CRYPTO_HASH_ENABLE << CRYPTO_WRITE_MASK_SHIFT) |
337 CRYPTO_HASH_ENABLE);
338 }
339
340 if (is_final && alg_ctx->left_bytes == 0)
341 lli_tail->user_define |= LLI_USER_STRING_LAST;
342
343 CRYPTO_TRACE("dma_ctrl = %08x, user_define = %08x, len = %u",
344 lli_head->dma_ctrl, lli_head->user_define, alg_ctx->count);
345
346 rk_crypto_dump_hw_desc(&hw_info->hw_desc);
347
348 dma_wmb();
349
350 /* enable crypto irq */
351 CRYPTO_WRITE(rk_dev, CRYPTO_DMA_INT_EN, 0x7f);
352
353 CRYPTO_WRITE(rk_dev, CRYPTO_DMA_CTL, dma_ctl | dma_ctl << CRYPTO_WRITE_MASK_SHIFT);
354
355 return 0;
356 }
357
rk_ahash_get_result(struct rk_crypto_dev * rk_dev,uint8_t * data,uint32_t data_len)358 static int rk_ahash_get_result(struct rk_crypto_dev *rk_dev,
359 uint8_t *data, uint32_t data_len)
360 {
361 int ret = 0;
362 u32 reg_ctrl = 0;
363 struct rk_ahash_ctx *ctx = rk_ahash_ctx_cast(rk_dev);
364
365 memset(ctx->priv, 0x00, sizeof(struct rk_hash_mid_data));
366
367 ret = read_poll_timeout_atomic(CRYPTO_READ, reg_ctrl,
368 reg_ctrl & CRYPTO_HASH_IS_VALID,
369 RK_POLL_PERIOD_US,
370 RK_POLL_TIMEOUT_US, false,
371 rk_dev, CRYPTO_HASH_VALID);
372 if (ret)
373 goto exit;
374
375 rk_crypto_read_regs(rk_dev, CRYPTO_HASH_DOUT_0, data, data_len);
376
377 CRYPTO_WRITE(rk_dev, CRYPTO_HASH_VALID, CRYPTO_HASH_IS_VALID);
378
379 exit:
380 clean_hash_setting(rk_dev);
381
382 return ret;
383 }
384
rk_cra_hash_init(struct crypto_tfm * tfm)385 static int rk_cra_hash_init(struct crypto_tfm *tfm)
386 {
387 struct rk_crypto_algt *algt =
388 rk_ahash_get_algt(__crypto_ahash_cast(tfm));
389 const char *alg_name = crypto_tfm_alg_name(tfm);
390 struct rk_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
391 struct rk_crypto_dev *rk_dev = algt->rk_dev;
392 struct rk_alg_ctx *alg_ctx = &ctx->algs_ctx;
393
394 CRYPTO_TRACE();
395
396 memset(ctx, 0x00, sizeof(*ctx));
397
398 if (!rk_dev->request_crypto)
399 return -EFAULT;
400
401 alg_ctx->align_size = RK_DMA_ALIGNMENT;
402
403 alg_ctx->ops.start = rk_ahash_start;
404 alg_ctx->ops.update = rk_ahash_crypto_rx;
405 alg_ctx->ops.complete = rk_ahash_crypto_complete;
406 alg_ctx->ops.irq_handle = rk_crypto_irq_handle;
407
408 alg_ctx->ops.hw_write_key = write_key_reg;
409 alg_ctx->ops.hw_init = rk_hw_hash_init;
410 alg_ctx->ops.hw_dma_start = rk_ahash_dma_start;
411 alg_ctx->ops.hw_get_result = rk_ahash_get_result;
412
413 ctx->rk_dev = rk_dev;
414 ctx->hash_tmp = (u8 *)get_zeroed_page(GFP_KERNEL | GFP_DMA32);
415 if (!ctx->hash_tmp) {
416 dev_err(rk_dev->dev, "Can't get zeroed page for hash tmp.\n");
417 return -ENOMEM;
418 }
419
420 ctx->priv = kmalloc(sizeof(struct rk_hash_mid_data), GFP_KERNEL);
421 if (!ctx->priv) {
422 free_page((unsigned long)ctx->hash_tmp);
423 return -ENOMEM;
424 }
425
426 memset(ctx->priv, 0x00, sizeof(struct rk_hash_mid_data));
427
428 rk_dev->request_crypto(rk_dev, alg_name);
429
430 crypto_ahash_set_reqsize(__crypto_ahash_cast(tfm), sizeof(struct rk_ahash_rctx));
431
432 algt->alg.hash.halg.statesize = sizeof(struct rk_ahash_expt_ctx);
433
434 return 0;
435 }
436
rk_cra_hash_exit(struct crypto_tfm * tfm)437 static void rk_cra_hash_exit(struct crypto_tfm *tfm)
438 {
439 struct rk_ahash_ctx *ctx = crypto_tfm_ctx(tfm);
440
441 CRYPTO_TRACE();
442
443 if (ctx->hash_tmp)
444 free_page((unsigned long)ctx->hash_tmp);
445
446 kfree(ctx->priv);
447
448 ctx->rk_dev->release_crypto(ctx->rk_dev, crypto_tfm_alg_name(tfm));
449 }
450
451 struct rk_crypto_algt rk_v3_ahash_md5 = RK_HASH_ALGO_INIT(MD5, md5);
452 struct rk_crypto_algt rk_v3_ahash_sha1 = RK_HASH_ALGO_INIT(SHA1, sha1);
453 struct rk_crypto_algt rk_v3_ahash_sha224 = RK_HASH_ALGO_INIT(SHA224, sha224);
454 struct rk_crypto_algt rk_v3_ahash_sha256 = RK_HASH_ALGO_INIT(SHA256, sha256);
455 struct rk_crypto_algt rk_v3_ahash_sha384 = RK_HASH_ALGO_INIT(SHA384, sha384);
456 struct rk_crypto_algt rk_v3_ahash_sha512 = RK_HASH_ALGO_INIT(SHA512, sha512);
457 struct rk_crypto_algt rk_v3_ahash_sm3 = RK_HASH_ALGO_INIT(SM3, sm3);
458
459 struct rk_crypto_algt rk_v3_hmac_md5 = RK_HMAC_ALGO_INIT(MD5, md5);
460 struct rk_crypto_algt rk_v3_hmac_sha1 = RK_HMAC_ALGO_INIT(SHA1, sha1);
461 struct rk_crypto_algt rk_v3_hmac_sha256 = RK_HMAC_ALGO_INIT(SHA256, sha256);
462 struct rk_crypto_algt rk_v3_hmac_sha512 = RK_HMAC_ALGO_INIT(SHA512, sha512);
463 struct rk_crypto_algt rk_v3_hmac_sm3 = RK_HMAC_ALGO_INIT(SM3, sm3);
464
465