1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright 2024 HiSilicon Limited. 4 * Kunpeng hardware accelerator HPRE module init. 5 */ 6 #include "hpre_main.h" 7 8 /* base config */ 9 #define HPRE_COMMON_CNT_CLR_CE 0x301000 10 #define HPRE_CFG_AXCACHE 0x301010 11 #define HPRE_RDCHN_INI_CFG 0x301014 12 #define HPRE_BD_ENDIAN 0x301020 13 #define HPRE_ECC_BYPASS 0x301024 14 #define HPRE_POISON_BYPASS 0x30102c 15 #define HPRE_BD_ARUSR_CFG 0x301030 16 #define HPRE_BD_AWUSR_CFG 0x301034 17 #define HPRE_TYPES_ENB 0x301038 18 #define HPRE_DATA_RUSER_CFG 0x30103c 19 #define HPRE_DATA_WUSER_CFG 0x301040 20 #define HPRE_HAC_INT_MASK 0x301400 21 #define HPRE_RAS_ECC_1BIT_TH 0x30140c 22 #define HPRE_RAS_CE_ENB 0x301410 23 #define HPRE_RAS_NFE_ENB 0x301414 24 #define HPRE_RAS_FE_ENB 0x301418 25 #define HPRE_HAC_INT_SRC 0x301600 26 #define HPRE_RDCHN_INI_ST 0x301a00 27 #define HPRE_OOO_SHUTDOWN_SEL 0x301a3c 28 #define HPRE_CORE_ENB 0x302004 29 #define HPRE_CORE_INI_CFG 0x302020 30 #define HPRE_CORE_INI_STATUS 0x302080 31 /* clock gate */ 32 #define HPRE_CLKGATE_CTL 0x301a10 33 #define HPRE_PEH_CFG_AUTO_GATE 0x301a2c 34 #define HPRE_CLUSTER_DYN_CTL 0x302010 35 #define HPRE_CORE_SHB_CFG 0x302088 36 #define HPRE_CORE_GATE_ENABLE GENMASK_32(31, 30) 37 38 #define HPRE_AXCACHE_MASK 0xff 39 #define HPRE_HAC_INT_DISABLE 0x1ffffff 40 #define HPRE_RAS_CE_MASK 0x1 41 #define HPRE_RAS_NFE_MASK 0x1fffffe 42 #define HPRE_RAS_FE_MASK 0 43 #define HPRE_BD_LITTLE_ENDIAN 0 44 #define HPRE_RSA_ENB BIT(0) 45 #define HPRE_ECC_ENB BIT(1) 46 #define HPRE_BD_ARUSR_MASK 0x2 47 #define HPRE_BD_AWUSR_MASK 0x102 48 #define HPRE_DATA_USR_MASK 0x32 49 #define HPRE_CLUSTER_CORE_MASK GENMASK_32(9, 0) 50 51 static SLIST_HEAD(, acc_device) hpre_list = SLIST_HEAD_INITIALIZER(hpre_list); 52 53 struct hisi_qp *hpre_create_qp(uint8_t sq_type) 54 { 55 struct acc_device *hpre_dev = NULL; 56 struct acc_device *cur_dev = NULL; 57 struct hisi_qm *qm = NULL; 58 uint32_t free_qp_num = 0; 59 uint32_t max_qp_num = 0; 60 61 /* Find the HPRE device with the most remaining qp numbers */ 62 SLIST_FOREACH(cur_dev, &hpre_list, link) { 63 qm = &cur_dev->qm; 64 if (qm->fun_type == HISI_QM_HW_PF) 65 free_qp_num = HISI_QM_PF_Q_NUM - qm->qp_in_used; 66 else 67 free_qp_num = HISI_QM_VF_Q_NUM - qm->qp_in_used; 68 if (free_qp_num > max_qp_num) { 69 max_qp_num = free_qp_num; 70 hpre_dev = cur_dev; 71 } 72 } 73 74 if (!hpre_dev) { 75 EMSG("No available hpre device"); 76 return NULL; 77 } 78 79 return hisi_qm_create_qp(&hpre_dev->qm, sq_type); 80 } 81 82 enum hisi_drv_status hpre_bin_from_crypto_bin(uint8_t *dst, const uint8_t *src, 83 uint32_t bsize, uint32_t dsize) 84 { 85 if (!src || !dst || !dsize || !bsize) { 86 EMSG("parameter error"); 87 return HISI_QM_DRVCRYPT_EINVAL; 88 } 89 90 if (bsize < dsize) { 91 EMSG("dsize is too long"); 92 return HISI_QM_DRVCRYPT_EINVAL; 93 } 94 95 if (src == dst && bsize == dsize) 96 return HISI_QM_DRVCRYPT_NO_ERR; 97 98 /* 99 * Copying non-zero data and padding with zeroes in high-bits 100 * (eg: 1 2 3 0 0 -> 0 0 1 2 3) 101 */ 102 memmove(dst + bsize - dsize, src, dsize); 103 memset(dst, 0, bsize - dsize); 104 105 return HISI_QM_DRVCRYPT_NO_ERR; 106 } 107 108 enum hisi_drv_status hpre_bin_to_crypto_bin(uint8_t *dst, const uint8_t *src, 109 uint32_t bsize, uint32_t dsize) 110 { 111 if (!dst || !src || !bsize || !dsize) { 112 EMSG("parameter error"); 113 return HISI_QM_DRVCRYPT_EINVAL; 114 } 115 116 if (bsize < dsize) { 117 EMSG("dsize is too long"); 118 return HISI_QM_DRVCRYPT_EINVAL; 119 } 120 121 if (src == dst && bsize == dsize) 122 return HISI_QM_DRVCRYPT_NO_ERR; 123 /* 124 * Copying non-zero data and padding with zeroes in low-bits 125 * (eg: 0 0 1 2 3 -> 1 2 3 0 0) 126 */ 127 memmove(dst, src + bsize - dsize, dsize); 128 memset(dst + dsize, 0, bsize - dsize); 129 130 return HISI_QM_DRVCRYPT_NO_ERR; 131 } 132 133 static enum hisi_drv_status hpre_set_cluster(struct hisi_qm *qm) 134 { 135 uint32_t val = 0; 136 137 io_write32(qm->io_base + HPRE_CORE_ENB, HPRE_CLUSTER_CORE_MASK); 138 io_write32(qm->io_base + HPRE_CORE_INI_CFG, 0x1); 139 140 if (IO_READ32_POLL_TIMEOUT(qm->io_base + HPRE_CORE_INI_STATUS, val, 141 (val & HPRE_CLUSTER_CORE_MASK) == 142 HPRE_CLUSTER_CORE_MASK, POLL_PERIOD, 143 POLL_TIMEOUT)) 144 return HISI_QM_DRVCRYPT_EBUSY; 145 return HISI_QM_DRVCRYPT_NO_ERR; 146 } 147 148 static void hpre_disable_clock_gate(struct hisi_qm *qm) 149 { 150 io_write32(qm->io_base + HPRE_CLKGATE_CTL, 0x0); 151 io_write32(qm->io_base + HPRE_PEH_CFG_AUTO_GATE, 0x0); 152 io_write32(qm->io_base + HPRE_CLUSTER_DYN_CTL, 0x0); 153 io_clrbits32(qm->io_base + HPRE_CORE_SHB_CFG, HPRE_CORE_GATE_ENABLE); 154 } 155 156 static void hpre_enable_clock_gate(struct hisi_qm *qm) 157 { 158 io_write32(qm->io_base + HPRE_CLKGATE_CTL, 0x1); 159 io_write32(qm->io_base + HPRE_PEH_CFG_AUTO_GATE, 0x1); 160 io_write32(qm->io_base + HPRE_CLUSTER_DYN_CTL, 0x1); 161 io_setbits32(qm->io_base + HPRE_CORE_SHB_CFG, HPRE_CORE_GATE_ENABLE); 162 } 163 164 static TEE_Result hpre_engine_init(struct acc_device *hpre_dev) 165 { 166 struct hisi_qm *qm = &hpre_dev->qm; 167 uint32_t val = 0; 168 int32_t ret = 0; 169 170 if (qm->fun_type == HISI_QM_HW_VF) 171 return TEE_SUCCESS; 172 173 hpre_disable_clock_gate(qm); 174 hisi_qm_dev_init(qm); 175 176 io_write32(qm->io_base + HPRE_CFG_AXCACHE, HPRE_AXCACHE_MASK); 177 io_write32(qm->io_base + HPRE_BD_ENDIAN, HPRE_BD_LITTLE_ENDIAN); 178 io_write32(qm->io_base + HPRE_RAS_CE_ENB, HPRE_RAS_CE_MASK); 179 io_write32(qm->io_base + HPRE_RAS_NFE_ENB, HPRE_RAS_NFE_MASK); 180 io_write32(qm->io_base + HPRE_RAS_FE_ENB, HPRE_RAS_FE_MASK); 181 io_write32(qm->io_base + HPRE_HAC_INT_MASK, HPRE_HAC_INT_DISABLE); 182 io_write32(qm->io_base + HPRE_POISON_BYPASS, 0x0); 183 io_write32(qm->io_base + HPRE_COMMON_CNT_CLR_CE, 0x0); 184 io_write32(qm->io_base + HPRE_ECC_BYPASS, 0x0); 185 /* cmd_type is controlled by hac subctrl */ 186 io_write32(qm->io_base + HPRE_BD_ARUSR_CFG, HPRE_BD_ARUSR_MASK); 187 io_write32(qm->io_base + HPRE_BD_AWUSR_CFG, HPRE_BD_AWUSR_MASK); 188 io_write32(qm->io_base + HPRE_DATA_RUSER_CFG, HPRE_DATA_USR_MASK); 189 io_write32(qm->io_base + HPRE_DATA_WUSER_CFG, HPRE_DATA_USR_MASK); 190 io_write32(qm->io_base + HPRE_TYPES_ENB, HPRE_RSA_ENB | HPRE_ECC_ENB); 191 io_write32(qm->io_base + HPRE_RDCHN_INI_CFG, 0x1); 192 ret = IO_READ32_POLL_TIMEOUT(qm->io_base + HPRE_RDCHN_INI_ST, val, 193 val & 0x1, POLL_PERIOD, POLL_TIMEOUT); 194 if (ret) { 195 EMSG("Fail to init rd channel"); 196 return TEE_ERROR_BUSY; 197 } 198 199 ret = hpre_set_cluster(qm); 200 if (ret) { 201 EMSG("Fail to init hpre cluster cores"); 202 return TEE_ERROR_BUSY; 203 } 204 205 hpre_enable_clock_gate(qm); 206 207 return TEE_SUCCESS; 208 } 209 210 static enum hisi_drv_status hpre_dev_status_check(struct hisi_qm *qm) 211 { 212 uint32_t val = 0; 213 214 val = io_read32(qm->io_base + HPRE_HAC_INT_SRC); 215 if (val & HPRE_RAS_NFE_MASK) { 216 EMSG("HPRE NFE RAS happened, need to reset"); 217 return HISI_QM_DRVCRYPT_HW_EACCESS; 218 } 219 220 val = io_read32(qm->io_base + HISI_QM_ABNML_INT_SRC); 221 if (val) { 222 if (val & HISI_QM_HPRE_NFE_INT_MASK) 223 EMSG("QM NFE RAS happened, need to reset"); 224 225 if (val & HISI_QM_INVALID_DB) { 226 EMSG("QM invalid db happened, please check"); 227 io_write32(qm->io_base + HISI_QM_ABNML_INT_SRC, 228 HISI_QM_INVALID_DB); 229 } 230 231 return HISI_QM_DRVCRYPT_HW_EACCESS; 232 } 233 234 return HISI_QM_DRVCRYPT_NO_ERR; 235 } 236 237 static enum hisi_drv_status hpre_qm_init(struct acc_device *hpre_dev) 238 { 239 struct hisi_qm *qm = &hpre_dev->qm; 240 241 if (cpu_mmu_enabled()) { 242 qm->io_base = (uintptr_t)phys_to_virt_io(hpre_dev->io_base, 243 hpre_dev->io_size); 244 if (!qm->io_base) { 245 EMSG("Fail to get qm io_base"); 246 return HISI_QM_DRVCRYPT_EFAULT; 247 } 248 } else { 249 qm->io_base = hpre_dev->io_base; 250 } 251 252 qm->vfs_num = hpre_dev->vfs_num; 253 qm->fun_type = hpre_dev->fun_type; 254 qm->sqe_size = HPRE_SQE_SIZE; 255 qm->sqe_log2_size = HPRE_SQE_LOG2_SIZE; 256 if (qm->fun_type == HISI_QM_HW_PF) { 257 hisi_qm_get_version(qm); 258 DMSG("HPRE hardware version is 0x%"PRIx32, qm->version); 259 qm->qp_base = HISI_QM_PF_Q_BASE; 260 qm->qp_num = HISI_QM_PF_Q_NUM; 261 qm->dev_status_check = hpre_dev_status_check; 262 } 263 264 return hisi_qm_init(qm); 265 } 266 267 static struct acc_device *hpre_pre_init(void) 268 { 269 struct acc_device *hpre_dev = NULL; 270 271 hpre_dev = calloc(1, sizeof(*hpre_dev)); 272 if (!hpre_dev) { 273 EMSG("Fail to alloc hpre_dev"); 274 return NULL; 275 } 276 277 hpre_dev->io_base = HPRE_BAR_BASE; 278 hpre_dev->io_size = HPRE_BAR_SIZE; 279 hpre_dev->fun_type = HISI_QM_HW_PF; 280 SLIST_INSERT_HEAD(&hpre_list, hpre_dev, link); 281 282 return hpre_dev; 283 } 284 285 static TEE_Result hpre_probe(void) 286 { 287 TEE_Result ret = TEE_ERROR_GENERIC; 288 struct acc_device *hpre_dev = NULL; 289 struct hisi_qm *qm = NULL; 290 291 DMSG("HPRE driver init start"); 292 hpre_dev = hpre_pre_init(); 293 if (!hpre_dev) 294 return TEE_ERROR_OUT_OF_MEMORY; 295 296 qm = &hpre_dev->qm; 297 if (hpre_qm_init(hpre_dev)) { 298 EMSG("Fail to init hpre qm"); 299 goto err_with_pre_init; 300 } 301 302 ret = hpre_engine_init(hpre_dev); 303 if (ret) { 304 EMSG("Fail to init engine"); 305 goto err_with_qm_init; 306 } 307 308 if (hisi_qm_start(qm)) { 309 EMSG("Fail to start qm"); 310 ret = TEE_ERROR_BAD_STATE; 311 goto err_with_qm_init; 312 } 313 314 DMSG("HPRE driver init done"); 315 return TEE_SUCCESS; 316 317 err_with_qm_init: 318 hisi_qm_uninit(qm); 319 err_with_pre_init: 320 SLIST_REMOVE_HEAD(&hpre_list, link); 321 free(hpre_dev); 322 323 return ret; 324 } 325 326 driver_init(hpre_probe); 327