Lines Matching full:blocks
29 int rounds, int blocks);
31 int rounds, int blocks);
34 int rounds, int blocks, u8 iv[]);
37 int rounds, int blocks, u8 iv[], u8 final[]);
40 int rounds, int blocks, u8 iv[]);
42 int rounds, int blocks, u8 iv[]);
46 int rounds, int blocks);
48 int rounds, int blocks, u8 iv[]);
94 int rounds, int blocks)) in __ecb_crypt() argument
104 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; in __ecb_crypt() local
107 blocks = round_down(blocks, in __ecb_crypt()
112 ctx->rounds, blocks); in __ecb_crypt()
115 walk.nbytes - blocks * AES_BLOCK_SIZE); in __ecb_crypt()
164 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; in cbc_encrypt() local
169 ctx->enc, ctx->key.rounds, blocks, in cbc_encrypt()
187 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; in cbc_decrypt() local
190 blocks = round_down(blocks, in cbc_decrypt()
195 ctx->key.rk, ctx->key.rounds, blocks, in cbc_decrypt()
199 walk.nbytes - blocks * AES_BLOCK_SIZE); in cbc_decrypt()
216 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; in ctr_encrypt() local
220 blocks = round_down(blocks, in ctr_encrypt()
227 ctx->rk, ctx->rounds, blocks, walk.iv, final); in ctr_encrypt()
231 u8 *dst = walk.dst.virt.addr + blocks * AES_BLOCK_SIZE; in ctr_encrypt()
232 u8 *src = walk.src.virt.addr + blocks * AES_BLOCK_SIZE; in ctr_encrypt()
241 walk.nbytes - blocks * AES_BLOCK_SIZE); in ctr_encrypt()
273 int rounds, int blocks, u8 iv[])) in __xts_crypt() argument
311 unsigned int blocks = walk.nbytes / AES_BLOCK_SIZE; in __xts_crypt() local
314 blocks = round_down(blocks, in __xts_crypt()
322 if (likely(blocks > 6)) { /* plain NEON is faster otherwise */ in __xts_crypt()
329 fn(out, in, ctx->key.rk, ctx->key.rounds, blocks, in __xts_crypt()
332 out += blocks * AES_BLOCK_SIZE; in __xts_crypt()
333 in += blocks * AES_BLOCK_SIZE; in __xts_crypt()
334 nbytes -= blocks * AES_BLOCK_SIZE; in __xts_crypt()