1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */ 2*4882a593Smuzhiyun /* Copyright (c) 2019 HiSilicon Limited. */ 3*4882a593Smuzhiyun 4*4882a593Smuzhiyun #ifndef __HISI_SEC_V2_H 5*4882a593Smuzhiyun #define __HISI_SEC_V2_H 6*4882a593Smuzhiyun 7*4882a593Smuzhiyun #include "../qm.h" 8*4882a593Smuzhiyun #include "sec_crypto.h" 9*4882a593Smuzhiyun 10*4882a593Smuzhiyun /* Algorithm resource per hardware SEC queue */ 11*4882a593Smuzhiyun struct sec_alg_res { 12*4882a593Smuzhiyun u8 *pbuf; 13*4882a593Smuzhiyun dma_addr_t pbuf_dma; 14*4882a593Smuzhiyun u8 *c_ivin; 15*4882a593Smuzhiyun dma_addr_t c_ivin_dma; 16*4882a593Smuzhiyun u8 *out_mac; 17*4882a593Smuzhiyun dma_addr_t out_mac_dma; 18*4882a593Smuzhiyun }; 19*4882a593Smuzhiyun 20*4882a593Smuzhiyun /* Cipher request of SEC private */ 21*4882a593Smuzhiyun struct sec_cipher_req { 22*4882a593Smuzhiyun struct hisi_acc_hw_sgl *c_in; 23*4882a593Smuzhiyun dma_addr_t c_in_dma; 24*4882a593Smuzhiyun struct hisi_acc_hw_sgl *c_out; 25*4882a593Smuzhiyun dma_addr_t c_out_dma; 26*4882a593Smuzhiyun u8 *c_ivin; 27*4882a593Smuzhiyun dma_addr_t c_ivin_dma; 28*4882a593Smuzhiyun struct skcipher_request *sk_req; 29*4882a593Smuzhiyun u32 c_len; 30*4882a593Smuzhiyun bool encrypt; 31*4882a593Smuzhiyun }; 32*4882a593Smuzhiyun 33*4882a593Smuzhiyun struct sec_aead_req { 34*4882a593Smuzhiyun u8 *out_mac; 35*4882a593Smuzhiyun dma_addr_t out_mac_dma; 36*4882a593Smuzhiyun struct aead_request *aead_req; 37*4882a593Smuzhiyun }; 38*4882a593Smuzhiyun 39*4882a593Smuzhiyun /* SEC request of Crypto */ 40*4882a593Smuzhiyun struct sec_req { 41*4882a593Smuzhiyun struct sec_sqe sec_sqe; 42*4882a593Smuzhiyun struct sec_ctx *ctx; 43*4882a593Smuzhiyun struct sec_qp_ctx *qp_ctx; 44*4882a593Smuzhiyun 45*4882a593Smuzhiyun struct sec_cipher_req c_req; 46*4882a593Smuzhiyun struct sec_aead_req aead_req; 47*4882a593Smuzhiyun struct list_head backlog_head; 48*4882a593Smuzhiyun 49*4882a593Smuzhiyun int err_type; 50*4882a593Smuzhiyun int req_id; 51*4882a593Smuzhiyun u32 flag; 52*4882a593Smuzhiyun 53*4882a593Smuzhiyun /* Status of the SEC request */ 54*4882a593Smuzhiyun bool fake_busy; 55*4882a593Smuzhiyun bool use_pbuf; 56*4882a593Smuzhiyun }; 57*4882a593Smuzhiyun 58*4882a593Smuzhiyun /** 59*4882a593Smuzhiyun * struct sec_req_op - Operations for SEC request 60*4882a593Smuzhiyun * @buf_map: DMA map the SGL buffers of the request 61*4882a593Smuzhiyun * @buf_unmap: DMA unmap the SGL buffers of the request 62*4882a593Smuzhiyun * @bd_fill: Fill the SEC queue BD 63*4882a593Smuzhiyun * @bd_send: Send the SEC BD into the hardware queue 64*4882a593Smuzhiyun * @callback: Call back for the request 65*4882a593Smuzhiyun * @process: Main processing logic of Skcipher 66*4882a593Smuzhiyun */ 67*4882a593Smuzhiyun struct sec_req_op { 68*4882a593Smuzhiyun int (*buf_map)(struct sec_ctx *ctx, struct sec_req *req); 69*4882a593Smuzhiyun void (*buf_unmap)(struct sec_ctx *ctx, struct sec_req *req); 70*4882a593Smuzhiyun void (*do_transfer)(struct sec_ctx *ctx, struct sec_req *req); 71*4882a593Smuzhiyun int (*bd_fill)(struct sec_ctx *ctx, struct sec_req *req); 72*4882a593Smuzhiyun int (*bd_send)(struct sec_ctx *ctx, struct sec_req *req); 73*4882a593Smuzhiyun void (*callback)(struct sec_ctx *ctx, struct sec_req *req, int err); 74*4882a593Smuzhiyun int (*process)(struct sec_ctx *ctx, struct sec_req *req); 75*4882a593Smuzhiyun }; 76*4882a593Smuzhiyun 77*4882a593Smuzhiyun /* SEC auth context */ 78*4882a593Smuzhiyun struct sec_auth_ctx { 79*4882a593Smuzhiyun dma_addr_t a_key_dma; 80*4882a593Smuzhiyun u8 *a_key; 81*4882a593Smuzhiyun u8 a_key_len; 82*4882a593Smuzhiyun u8 mac_len; 83*4882a593Smuzhiyun u8 a_alg; 84*4882a593Smuzhiyun struct crypto_shash *hash_tfm; 85*4882a593Smuzhiyun }; 86*4882a593Smuzhiyun 87*4882a593Smuzhiyun /* SEC cipher context which cipher's relatives */ 88*4882a593Smuzhiyun struct sec_cipher_ctx { 89*4882a593Smuzhiyun u8 *c_key; 90*4882a593Smuzhiyun dma_addr_t c_key_dma; 91*4882a593Smuzhiyun sector_t iv_offset; 92*4882a593Smuzhiyun u32 c_gran_size; 93*4882a593Smuzhiyun u32 ivsize; 94*4882a593Smuzhiyun u8 c_mode; 95*4882a593Smuzhiyun u8 c_alg; 96*4882a593Smuzhiyun u8 c_key_len; 97*4882a593Smuzhiyun }; 98*4882a593Smuzhiyun 99*4882a593Smuzhiyun /* SEC queue context which defines queue's relatives */ 100*4882a593Smuzhiyun struct sec_qp_ctx { 101*4882a593Smuzhiyun struct hisi_qp *qp; 102*4882a593Smuzhiyun struct sec_req *req_list[QM_Q_DEPTH]; 103*4882a593Smuzhiyun struct idr req_idr; 104*4882a593Smuzhiyun struct sec_alg_res res[QM_Q_DEPTH]; 105*4882a593Smuzhiyun struct sec_ctx *ctx; 106*4882a593Smuzhiyun spinlock_t req_lock; 107*4882a593Smuzhiyun struct list_head backlog; 108*4882a593Smuzhiyun struct hisi_acc_sgl_pool *c_in_pool; 109*4882a593Smuzhiyun struct hisi_acc_sgl_pool *c_out_pool; 110*4882a593Smuzhiyun atomic_t pending_reqs; 111*4882a593Smuzhiyun }; 112*4882a593Smuzhiyun 113*4882a593Smuzhiyun enum sec_alg_type { 114*4882a593Smuzhiyun SEC_SKCIPHER, 115*4882a593Smuzhiyun SEC_AEAD 116*4882a593Smuzhiyun }; 117*4882a593Smuzhiyun 118*4882a593Smuzhiyun /* SEC Crypto TFM context which defines queue and cipher .etc relatives */ 119*4882a593Smuzhiyun struct sec_ctx { 120*4882a593Smuzhiyun struct sec_qp_ctx *qp_ctx; 121*4882a593Smuzhiyun struct sec_dev *sec; 122*4882a593Smuzhiyun const struct sec_req_op *req_op; 123*4882a593Smuzhiyun struct hisi_qp **qps; 124*4882a593Smuzhiyun 125*4882a593Smuzhiyun /* Half queues for encipher, and half for decipher */ 126*4882a593Smuzhiyun u32 hlf_q_num; 127*4882a593Smuzhiyun 128*4882a593Smuzhiyun /* Threshold for fake busy, trigger to return -EBUSY to user */ 129*4882a593Smuzhiyun u32 fake_req_limit; 130*4882a593Smuzhiyun 131*4882a593Smuzhiyun /* Currrent cyclic index to select a queue for encipher */ 132*4882a593Smuzhiyun atomic_t enc_qcyclic; 133*4882a593Smuzhiyun 134*4882a593Smuzhiyun /* Currrent cyclic index to select a queue for decipher */ 135*4882a593Smuzhiyun atomic_t dec_qcyclic; 136*4882a593Smuzhiyun 137*4882a593Smuzhiyun enum sec_alg_type alg_type; 138*4882a593Smuzhiyun bool pbuf_supported; 139*4882a593Smuzhiyun struct sec_cipher_ctx c_ctx; 140*4882a593Smuzhiyun struct sec_auth_ctx a_ctx; 141*4882a593Smuzhiyun struct device *dev; 142*4882a593Smuzhiyun }; 143*4882a593Smuzhiyun 144*4882a593Smuzhiyun enum sec_endian { 145*4882a593Smuzhiyun SEC_LE = 0, 146*4882a593Smuzhiyun SEC_32BE, 147*4882a593Smuzhiyun SEC_64BE 148*4882a593Smuzhiyun }; 149*4882a593Smuzhiyun 150*4882a593Smuzhiyun enum sec_debug_file_index { 151*4882a593Smuzhiyun SEC_CURRENT_QM, 152*4882a593Smuzhiyun SEC_CLEAR_ENABLE, 153*4882a593Smuzhiyun SEC_DEBUG_FILE_NUM, 154*4882a593Smuzhiyun }; 155*4882a593Smuzhiyun 156*4882a593Smuzhiyun struct sec_debug_file { 157*4882a593Smuzhiyun enum sec_debug_file_index index; 158*4882a593Smuzhiyun spinlock_t lock; 159*4882a593Smuzhiyun struct hisi_qm *qm; 160*4882a593Smuzhiyun }; 161*4882a593Smuzhiyun 162*4882a593Smuzhiyun struct sec_dfx { 163*4882a593Smuzhiyun atomic64_t send_cnt; 164*4882a593Smuzhiyun atomic64_t recv_cnt; 165*4882a593Smuzhiyun atomic64_t send_busy_cnt; 166*4882a593Smuzhiyun atomic64_t recv_busy_cnt; 167*4882a593Smuzhiyun atomic64_t err_bd_cnt; 168*4882a593Smuzhiyun atomic64_t invalid_req_cnt; 169*4882a593Smuzhiyun atomic64_t done_flag_cnt; 170*4882a593Smuzhiyun }; 171*4882a593Smuzhiyun 172*4882a593Smuzhiyun struct sec_debug { 173*4882a593Smuzhiyun struct sec_dfx dfx; 174*4882a593Smuzhiyun struct sec_debug_file files[SEC_DEBUG_FILE_NUM]; 175*4882a593Smuzhiyun }; 176*4882a593Smuzhiyun 177*4882a593Smuzhiyun struct sec_dev { 178*4882a593Smuzhiyun struct hisi_qm qm; 179*4882a593Smuzhiyun struct sec_debug debug; 180*4882a593Smuzhiyun u32 ctx_q_num; 181*4882a593Smuzhiyun bool iommu_used; 182*4882a593Smuzhiyun unsigned long status; 183*4882a593Smuzhiyun }; 184*4882a593Smuzhiyun 185*4882a593Smuzhiyun void sec_destroy_qps(struct hisi_qp **qps, int qp_num); 186*4882a593Smuzhiyun struct hisi_qp **sec_create_qps(void); 187*4882a593Smuzhiyun int sec_register_to_crypto(void); 188*4882a593Smuzhiyun void sec_unregister_from_crypto(void); 189*4882a593Smuzhiyun #endif 190