1 // SPDX-License-Identifier: GPL-2.0
2 /*
3 * Rockchip crypto uitls
4 *
5 * Copyright (c) 2022, Rockchip Electronics Co., Ltd
6 *
7 * Author: Lin Jinhan <troy.lin@rock-chips.com>
8 *
9 */
10
11 #include <crypto/scatterwalk.h>
12 #include <linux/scatterlist.h>
13
14 #include "rk_crypto_core.h"
15 #include "rk_crypto_utils.h"
16
word2byte_be(u32 word,u8 * ch)17 static inline void word2byte_be(u32 word, u8 *ch)
18 {
19 ch[0] = (word >> 24) & 0xff;
20 ch[1] = (word >> 16) & 0xff;
21 ch[2] = (word >> 8) & 0xff;
22 ch[3] = (word >> 0) & 0xff;
23 }
24
byte2word_be(const u8 * ch)25 static inline u32 byte2word_be(const u8 *ch)
26 {
27 return (*ch << 24) + (*(ch + 1) << 16) +
28 (*(ch + 2) << 8) + *(ch + 3);
29 }
30
rk_crypto_write_regs(struct rk_crypto_dev * rk_dev,u32 base_addr,const u8 * data,u32 bytes)31 void rk_crypto_write_regs(struct rk_crypto_dev *rk_dev, u32 base_addr, const u8 *data, u32 bytes)
32 {
33 u32 i;
34 u8 tmp_buf[4];
35
36 for (i = 0; i < bytes / 4; i++, base_addr += 4)
37 CRYPTO_WRITE(rk_dev, base_addr, byte2word_be(data + i * 4));
38
39 if (bytes % 4) {
40 memset(tmp_buf, 0x00, sizeof(tmp_buf));
41 memcpy((u8 *)tmp_buf, data + (bytes / 4) * 4, bytes % 4);
42 CRYPTO_WRITE(rk_dev, base_addr, byte2word_be(tmp_buf));
43 }
44 }
45
rk_crypto_clear_regs(struct rk_crypto_dev * rk_dev,u32 base_addr,u32 words)46 void rk_crypto_clear_regs(struct rk_crypto_dev *rk_dev, u32 base_addr, u32 words)
47 {
48 u32 i;
49
50 for (i = 0; i < words; i++, base_addr += 4)
51 CRYPTO_WRITE(rk_dev, base_addr, 0);
52 }
53
rk_crypto_read_regs(struct rk_crypto_dev * rk_dev,u32 base_addr,u8 * data,u32 bytes)54 void rk_crypto_read_regs(struct rk_crypto_dev *rk_dev, u32 base_addr, u8 *data, u32 bytes)
55 {
56 u32 i;
57
58 for (i = 0; i < bytes / 4; i++, base_addr += 4)
59 word2byte_be(CRYPTO_READ(rk_dev, base_addr), data + i * 4);
60
61 if (bytes % 4) {
62 uint8_t tmp_buf[4];
63
64 word2byte_be(CRYPTO_READ(rk_dev, base_addr), tmp_buf);
65 memcpy(data + i * 4, tmp_buf, bytes % 4);
66 }
67 }
68
check_scatter_align(struct scatterlist * sg_src,struct scatterlist * sg_dst,int align_mask)69 static int check_scatter_align(struct scatterlist *sg_src,
70 struct scatterlist *sg_dst,
71 int align_mask)
72 {
73 int in, out, align;
74
75 in = IS_ALIGNED((u32)sg_src->offset, 4) &&
76 IS_ALIGNED((u32)sg_src->length, align_mask) &&
77 (sg_phys(sg_src) < SZ_4G);
78 if (!sg_dst)
79 return in;
80
81 out = IS_ALIGNED((u32)sg_dst->offset, 4) &&
82 IS_ALIGNED((u32)sg_dst->length, align_mask) &&
83 (sg_phys(sg_dst) < SZ_4G);
84 align = in && out;
85
86 return (align && (sg_src->length == sg_dst->length));
87 }
88
rk_crypto_check_align(struct scatterlist * src_sg,size_t src_nents,struct scatterlist * dst_sg,size_t dst_nents,int align_mask)89 bool rk_crypto_check_align(struct scatterlist *src_sg, size_t src_nents,
90 struct scatterlist *dst_sg, size_t dst_nents,
91 int align_mask)
92 {
93 struct scatterlist *src_tmp = NULL;
94 struct scatterlist *dst_tmp = NULL;
95 unsigned int i;
96
97 if (dst_sg && src_nents != dst_nents)
98 return false;
99
100 src_tmp = src_sg;
101 dst_tmp = dst_sg;
102
103 for (i = 0; i < src_nents; i++) {
104 if (!src_tmp)
105 return false;
106
107 if (!check_scatter_align(src_tmp, dst_tmp, align_mask))
108 return false;
109
110 src_tmp = sg_next(src_tmp);
111
112 if (dst_sg)
113 dst_tmp = sg_next(dst_tmp);
114 }
115
116 return true;
117 }
118
rk_crypto_check_dmafd(struct scatterlist * sgl,size_t nents)119 bool rk_crypto_check_dmafd(struct scatterlist *sgl, size_t nents)
120 {
121 struct scatterlist *src_tmp = NULL;
122 unsigned int i;
123
124 for_each_sg(sgl, src_tmp, nents, i) {
125 if (!src_tmp)
126 return false;
127
128 if (src_tmp->length && !sg_dma_address(src_tmp))
129 return false;
130 }
131
132 return true;
133 }
134
rk_crypto_dump_hw_desc(struct rk_hw_desc * hw_desc)135 void rk_crypto_dump_hw_desc(struct rk_hw_desc *hw_desc)
136 {
137 struct crypto_lli_desc *cur_lli = NULL;
138 u32 i;
139
140 cur_lli = hw_desc->lli_head;
141
142 CRYPTO_TRACE("lli_head = %lx, lli_tail = %lx",
143 (unsigned long)hw_desc->lli_head, (unsigned long)hw_desc->lli_tail);
144
145 for (i = 0; i < hw_desc->total; i++, cur_lli++) {
146 CRYPTO_TRACE("cur_lli = %lx", (unsigned long)cur_lli);
147 CRYPTO_TRACE("src_addr = %08x", cur_lli->src_addr);
148 CRYPTO_TRACE("src_len = %08x", cur_lli->src_len);
149 CRYPTO_TRACE("dst_addr = %08x", cur_lli->dst_addr);
150 CRYPTO_TRACE("dst_len = %08x", cur_lli->dst_len);
151 CRYPTO_TRACE("user_def = %08x", cur_lli->user_define);
152 CRYPTO_TRACE("dma_ctl = %08x", cur_lli->dma_ctrl);
153 CRYPTO_TRACE("next = %08x\n", cur_lli->next_addr);
154
155 if (cur_lli == hw_desc->lli_tail)
156 break;
157 }
158 }
159
rk_crypto_hw_desc_maxlen(struct scatterlist * sg,u64 len,u32 * max_nents)160 u64 rk_crypto_hw_desc_maxlen(struct scatterlist *sg, u64 len, u32 *max_nents)
161 {
162 int nents;
163 u64 total;
164
165 if (!len)
166 return 0;
167
168 for (nents = 0, total = 0; sg; sg = sg_next(sg)) {
169 if (!sg)
170 goto exit;
171
172 nents++;
173 total += sg->length;
174
175 if (nents >= RK_DEFAULT_LLI_CNT || total >= len)
176 goto exit;
177 }
178
179 exit:
180 *max_nents = nents;
181 return total > len ? len : total;
182 }
183
rk_crypto_hw_desc_alloc(struct device * dev,struct rk_hw_desc * hw_desc)184 int rk_crypto_hw_desc_alloc(struct device *dev, struct rk_hw_desc *hw_desc)
185 {
186 u32 lli_cnt = RK_DEFAULT_LLI_CNT;
187 u32 lli_len = lli_cnt * sizeof(struct crypto_lli_desc);
188
189 if (!dev || !hw_desc)
190 return -EINVAL;
191
192 memset(hw_desc, 0x00, sizeof(*hw_desc));
193
194 hw_desc->lli_aad = dma_alloc_coherent(dev, sizeof(struct crypto_lli_desc),
195 &hw_desc->lli_aad_dma, GFP_KERNEL);
196 if (!hw_desc->lli_aad)
197 return -ENOMEM;
198
199 ///TODO: cma
200 hw_desc->lli_head = dma_alloc_coherent(dev, lli_len, &hw_desc->lli_head_dma, GFP_KERNEL);
201 if (!hw_desc->lli_head) {
202 dma_free_coherent(dev, sizeof(struct crypto_lli_desc),
203 hw_desc->lli_aad, hw_desc->lli_aad_dma);
204 return -ENOMEM;
205 }
206
207 hw_desc->lli_tail = hw_desc->lli_head;
208 hw_desc->total = lli_cnt;
209 hw_desc->dev = dev;
210
211 memset(hw_desc->lli_head, 0x00, lli_len);
212
213 CRYPTO_TRACE("dev = %lx, buffer_len = %u, lli_head = %lx, lli_head_dma = %lx",
214 (unsigned long)hw_desc->dev, lli_len,
215 (unsigned long)hw_desc->lli_head, (unsigned long)hw_desc->lli_head_dma);
216
217 return 0;
218 }
219
rk_crypto_hw_desc_free(struct rk_hw_desc * hw_desc)220 void rk_crypto_hw_desc_free(struct rk_hw_desc *hw_desc)
221 {
222 if (!hw_desc || !hw_desc->dev || !hw_desc->lli_head)
223 return;
224
225 CRYPTO_TRACE("dev = %lx, buffer_len = %lu, lli_head = %lx, lli_head_dma = %lx",
226 (unsigned long)hw_desc->dev,
227 (unsigned long)hw_desc->total * sizeof(struct crypto_lli_desc),
228 (unsigned long)hw_desc->lli_head, (unsigned long)hw_desc->lli_head_dma);
229
230 dma_free_coherent(hw_desc->dev, sizeof(struct crypto_lli_desc),
231 hw_desc->lli_aad, hw_desc->lli_aad_dma);
232
233 dma_free_coherent(hw_desc->dev, hw_desc->total * sizeof(struct crypto_lli_desc),
234 hw_desc->lli_head, hw_desc->lli_head_dma);
235
236 memset(hw_desc, 0x00, sizeof(*hw_desc));
237 }
238
rk_crypto_hw_desc_init(struct rk_hw_desc * hw_desc,struct scatterlist * src_sg,struct scatterlist * dst_sg,u64 len)239 int rk_crypto_hw_desc_init(struct rk_hw_desc *hw_desc,
240 struct scatterlist *src_sg,
241 struct scatterlist *dst_sg,
242 u64 len)
243 {
244 struct crypto_lli_desc *cur_lli = NULL;
245 struct scatterlist *tmp_src, *tmp_dst;
246 dma_addr_t tmp_next_dma;
247 u32 src_nents, dst_nents;
248 u32 i, data_cnt = 0;
249
250 if (!hw_desc || !hw_desc->dev || !hw_desc->lli_head)
251 return -EINVAL;
252
253 if (!src_sg || len == 0)
254 return -EINVAL;
255
256 src_nents = sg_nents_for_len(src_sg, len);
257 dst_nents = dst_sg ? sg_nents_for_len(dst_sg, len) : src_nents;
258
259 if (src_nents != dst_nents)
260 return -EINVAL;
261
262 CRYPTO_TRACE("src_nents = %u, total = %u, len = %llu", src_nents, hw_desc->total, len);
263
264 if (src_nents > hw_desc->total) {
265 pr_err("crypto: nents overflow, %u > %u", src_nents, hw_desc->total);
266 return -ENOMEM;
267 }
268
269 memset(hw_desc->lli_head, 0x00, src_nents * sizeof(struct crypto_lli_desc));
270
271 cur_lli = hw_desc->lli_head;
272 tmp_src = src_sg;
273 tmp_dst = dst_sg;
274 tmp_next_dma = hw_desc->lli_head_dma + sizeof(*cur_lli);
275
276 if (dst_sg) {
277 for (i = 0; i < src_nents - 1; i++, cur_lli++, tmp_next_dma += sizeof(*cur_lli)) {
278 cur_lli->src_addr = sg_dma_address(tmp_src);
279 cur_lli->src_len = sg_dma_len(tmp_src);
280 cur_lli->dst_addr = sg_dma_address(tmp_dst);
281 cur_lli->dst_len = sg_dma_len(tmp_dst);
282 cur_lli->next_addr = tmp_next_dma;
283
284 data_cnt += sg_dma_len(tmp_src);
285 tmp_src = sg_next(tmp_src);
286 tmp_dst = sg_next(tmp_dst);
287 }
288 } else {
289 for (i = 0; i < src_nents - 1; i++, cur_lli++, tmp_next_dma += sizeof(*cur_lli)) {
290 cur_lli->src_addr = sg_dma_address(tmp_src);
291 cur_lli->src_len = sg_dma_len(tmp_src);
292 cur_lli->next_addr = tmp_next_dma;
293
294 data_cnt += sg_dma_len(tmp_src);
295 tmp_src = sg_next(tmp_src);
296 }
297 }
298
299 /* for last lli */
300 cur_lli->src_addr = sg_dma_address(tmp_src);
301 cur_lli->src_len = len - data_cnt;
302 cur_lli->next_addr = 0;
303
304 if (dst_sg) {
305 cur_lli->dst_addr = sg_dma_address(tmp_dst);
306 cur_lli->dst_len = len - data_cnt;
307 }
308
309 hw_desc->lli_tail = cur_lli;
310
311 return 0;
312 }
313
314