1 // SPDX-License-Identifier: BSD-2-Clause 2 /* 3 * Copyright 2024 HiSilicon Limited. 4 * Kunpeng hardware accelerator HPRE module init. 5 */ 6 7 #include <hpre_main.h> 8 #include <initcall.h> 9 10 /* base config */ 11 #define HPRE_COMMON_CNT_CLR_CE 0x301000 12 #define HPRE_CFG_AXCACHE 0x301010 13 #define HPRE_RDCHN_INI_CFG 0x301014 14 #define HPRE_BD_ENDIAN 0x301020 15 #define HPRE_ECC_BYPASS 0x301024 16 #define HPRE_POISON_BYPASS 0x30102c 17 #define HPRE_BD_ARUSR_CFG 0x301030 18 #define HPRE_BD_AWUSR_CFG 0x301034 19 #define HPRE_TYPES_ENB 0x301038 20 #define HPRE_DATA_RUSER_CFG 0x30103c 21 #define HPRE_DATA_WUSER_CFG 0x301040 22 #define HPRE_HAC_INT_MASK 0x301400 23 #define HPRE_RAS_ECC_1BIT_TH 0x30140c 24 #define HPRE_RAS_CE_ENB 0x301410 25 #define HPRE_RAS_NFE_ENB 0x301414 26 #define HPRE_RAS_FE_ENB 0x301418 27 #define HPRE_HAC_INT_SRC 0x301600 28 #define HPRE_RDCHN_INI_ST 0x301a00 29 #define HPRE_OOO_SHUTDOWN_SEL 0x301a3c 30 #define HPRE_CORE_ENB 0x302004 31 #define HPRE_CORE_INI_CFG 0x302020 32 #define HPRE_CORE_INI_STATUS 0x302080 33 /* clock gate */ 34 #define HPRE_CLKGATE_CTL 0x301a10 35 #define HPRE_PEH_CFG_AUTO_GATE 0x301a2c 36 #define HPRE_CLUSTER_DYN_CTL 0x302010 37 #define HPRE_CORE_SHB_CFG 0x302088 38 #define HPRE_CORE_GATE_ENABLE GENMASK_32(31, 30) 39 40 #define HPRE_AXCACHE_MASK 0xff 41 #define HPRE_HAC_INT_DISABLE 0x1ffffff 42 #define HPRE_RAS_CE_MASK 0x1 43 #define HPRE_RAS_NFE_MASK 0x1fffffe 44 #define HPRE_RAS_FE_MASK 0 45 #define HPRE_BD_LITTLE_ENDIAN 0 46 #define HPRE_RSA_ENB BIT(0) 47 #define HPRE_ECC_ENB BIT(1) 48 #define HPRE_BD_ARUSR_MASK 0x2 49 #define HPRE_BD_AWUSR_MASK 0x102 50 #define HPRE_DATA_USR_MASK 0x32 51 #define HPRE_CLUSTER_CORE_MASK GENMASK_32(9, 0) 52 53 static SLIST_HEAD(, acc_device) hpre_list = SLIST_HEAD_INITIALIZER(hpre_list); 54 55 struct hisi_qp *hpre_create_qp(uint8_t sq_type) 56 { 57 struct acc_device *hpre_dev = NULL; 58 struct acc_device *cur_dev = NULL; 59 struct hisi_qm *qm = NULL; 60 uint32_t free_qp_num = 0; 61 uint32_t max_qp_num = 0; 62 63 /* Find the HPRE device with the most remaining qp numbers */ 64 SLIST_FOREACH(cur_dev, &hpre_list, link) { 65 qm = &cur_dev->qm; 66 if (qm->fun_type == HISI_QM_HW_PF) 67 free_qp_num = HISI_QM_PF_Q_NUM - qm->qp_in_used; 68 else 69 free_qp_num = HISI_QM_VF_Q_NUM - qm->qp_in_used; 70 if (free_qp_num > max_qp_num) { 71 max_qp_num = free_qp_num; 72 hpre_dev = cur_dev; 73 } 74 } 75 76 if (!hpre_dev) { 77 EMSG("No available hpre device"); 78 return NULL; 79 } 80 81 return hisi_qm_create_qp(&hpre_dev->qm, sq_type); 82 } 83 84 enum hisi_drv_status hpre_bin_from_crypto_bin(uint8_t *dst, const uint8_t *src, 85 uint32_t bsize, uint32_t dsize) 86 { 87 if (!src || !dst || !dsize || !bsize) { 88 EMSG("parameter error"); 89 return HISI_QM_DRVCRYPT_EINVAL; 90 } 91 92 if (bsize < dsize) { 93 EMSG("dsize is too long"); 94 return HISI_QM_DRVCRYPT_EINVAL; 95 } 96 97 if (src == dst && bsize == dsize) 98 return HISI_QM_DRVCRYPT_NO_ERR; 99 100 /* 101 * Copying non-zero data and padding with zeroes in high-bits 102 * (eg: 1 2 3 0 0 -> 0 0 1 2 3) 103 */ 104 memmove(dst + bsize - dsize, src, dsize); 105 memset(dst, 0, bsize - dsize); 106 107 return HISI_QM_DRVCRYPT_NO_ERR; 108 } 109 110 enum hisi_drv_status hpre_bin_to_crypto_bin(uint8_t *dst, const uint8_t *src, 111 uint32_t bsize, uint32_t dsize) 112 { 113 if (!dst || !src || !bsize || !dsize) { 114 EMSG("parameter error"); 115 return HISI_QM_DRVCRYPT_EINVAL; 116 } 117 118 if (bsize < dsize) { 119 EMSG("dsize is too long"); 120 return HISI_QM_DRVCRYPT_EINVAL; 121 } 122 123 if (src == dst && bsize == dsize) 124 return HISI_QM_DRVCRYPT_NO_ERR; 125 /* 126 * Copying non-zero data and padding with zeroes in low-bits 127 * (eg: 0 0 1 2 3 -> 1 2 3 0 0) 128 */ 129 memmove(dst, src + bsize - dsize, dsize); 130 memset(dst + dsize, 0, bsize - dsize); 131 132 return HISI_QM_DRVCRYPT_NO_ERR; 133 } 134 135 static enum hisi_drv_status hpre_set_cluster(struct hisi_qm *qm) 136 { 137 uint32_t val = 0; 138 139 io_write32(qm->io_base + HPRE_CORE_ENB, HPRE_CLUSTER_CORE_MASK); 140 io_write32(qm->io_base + HPRE_CORE_INI_CFG, 0x1); 141 142 if (IO_READ32_POLL_TIMEOUT(qm->io_base + HPRE_CORE_INI_STATUS, val, 143 (val & HPRE_CLUSTER_CORE_MASK) == 144 HPRE_CLUSTER_CORE_MASK, POLL_PERIOD, 145 POLL_TIMEOUT)) 146 return HISI_QM_DRVCRYPT_EBUSY; 147 return HISI_QM_DRVCRYPT_NO_ERR; 148 } 149 150 static void hpre_disable_clock_gate(struct hisi_qm *qm) 151 { 152 io_write32(qm->io_base + HPRE_CLKGATE_CTL, 0x0); 153 io_write32(qm->io_base + HPRE_PEH_CFG_AUTO_GATE, 0x0); 154 io_write32(qm->io_base + HPRE_CLUSTER_DYN_CTL, 0x0); 155 io_clrbits32(qm->io_base + HPRE_CORE_SHB_CFG, HPRE_CORE_GATE_ENABLE); 156 } 157 158 static void hpre_enable_clock_gate(struct hisi_qm *qm) 159 { 160 io_write32(qm->io_base + HPRE_CLKGATE_CTL, 0x1); 161 io_write32(qm->io_base + HPRE_PEH_CFG_AUTO_GATE, 0x1); 162 io_write32(qm->io_base + HPRE_CLUSTER_DYN_CTL, 0x1); 163 io_setbits32(qm->io_base + HPRE_CORE_SHB_CFG, HPRE_CORE_GATE_ENABLE); 164 } 165 166 static TEE_Result hpre_engine_init(struct acc_device *hpre_dev) 167 { 168 struct hisi_qm *qm = &hpre_dev->qm; 169 uint32_t val = 0; 170 int32_t ret = 0; 171 172 if (qm->fun_type == HISI_QM_HW_VF) 173 return TEE_SUCCESS; 174 175 hpre_disable_clock_gate(qm); 176 hisi_qm_dev_init(qm); 177 178 io_write32(qm->io_base + HPRE_CFG_AXCACHE, HPRE_AXCACHE_MASK); 179 io_write32(qm->io_base + HPRE_BD_ENDIAN, HPRE_BD_LITTLE_ENDIAN); 180 io_write32(qm->io_base + HPRE_RAS_CE_ENB, HPRE_RAS_CE_MASK); 181 io_write32(qm->io_base + HPRE_RAS_NFE_ENB, HPRE_RAS_NFE_MASK); 182 io_write32(qm->io_base + HPRE_RAS_FE_ENB, HPRE_RAS_FE_MASK); 183 io_write32(qm->io_base + HPRE_HAC_INT_MASK, HPRE_HAC_INT_DISABLE); 184 io_write32(qm->io_base + HPRE_POISON_BYPASS, 0x0); 185 io_write32(qm->io_base + HPRE_COMMON_CNT_CLR_CE, 0x0); 186 io_write32(qm->io_base + HPRE_ECC_BYPASS, 0x0); 187 /* cmd_type is controlled by hac subctrl */ 188 io_write32(qm->io_base + HPRE_BD_ARUSR_CFG, HPRE_BD_ARUSR_MASK); 189 io_write32(qm->io_base + HPRE_BD_AWUSR_CFG, HPRE_BD_AWUSR_MASK); 190 io_write32(qm->io_base + HPRE_DATA_RUSER_CFG, HPRE_DATA_USR_MASK); 191 io_write32(qm->io_base + HPRE_DATA_WUSER_CFG, HPRE_DATA_USR_MASK); 192 io_write32(qm->io_base + HPRE_TYPES_ENB, HPRE_RSA_ENB | HPRE_ECC_ENB); 193 io_write32(qm->io_base + HPRE_RDCHN_INI_CFG, 0x1); 194 ret = IO_READ32_POLL_TIMEOUT(qm->io_base + HPRE_RDCHN_INI_ST, val, 195 val & 0x1, POLL_PERIOD, POLL_TIMEOUT); 196 if (ret) { 197 EMSG("Fail to init rd channel"); 198 return TEE_ERROR_BUSY; 199 } 200 201 ret = hpre_set_cluster(qm); 202 if (ret) { 203 EMSG("Fail to init hpre cluster cores"); 204 return TEE_ERROR_BUSY; 205 } 206 207 hpre_enable_clock_gate(qm); 208 209 return TEE_SUCCESS; 210 } 211 212 static enum hisi_drv_status hpre_dev_status_check(struct hisi_qm *qm) 213 { 214 uint32_t val = 0; 215 216 val = io_read32(qm->io_base + HPRE_HAC_INT_SRC); 217 if (val & HPRE_RAS_NFE_MASK) { 218 EMSG("HPRE NFE RAS happened, need to reset"); 219 return HISI_QM_DRVCRYPT_HW_EACCESS; 220 } 221 222 val = io_read32(qm->io_base + HISI_QM_ABNML_INT_SRC); 223 if (val) { 224 if (val & HISI_QM_HPRE_NFE_INT_MASK) 225 EMSG("QM NFE RAS happened, need to reset"); 226 227 if (val & HISI_QM_INVALID_DB) { 228 EMSG("QM invalid db happened, please check"); 229 io_write32(qm->io_base + HISI_QM_ABNML_INT_SRC, 230 HISI_QM_INVALID_DB); 231 } 232 233 return HISI_QM_DRVCRYPT_HW_EACCESS; 234 } 235 236 return HISI_QM_DRVCRYPT_NO_ERR; 237 } 238 239 static enum hisi_drv_status hpre_qm_init(struct acc_device *hpre_dev) 240 { 241 struct hisi_qm *qm = &hpre_dev->qm; 242 243 if (cpu_mmu_enabled()) { 244 qm->io_base = (uintptr_t)phys_to_virt_io(hpre_dev->io_base, 245 hpre_dev->io_size); 246 if (!qm->io_base) { 247 EMSG("Fail to get qm io_base"); 248 return HISI_QM_DRVCRYPT_EFAULT; 249 } 250 } else { 251 qm->io_base = hpre_dev->io_base; 252 } 253 254 qm->vfs_num = hpre_dev->vfs_num; 255 qm->fun_type = hpre_dev->fun_type; 256 qm->sqe_size = HPRE_SQE_SIZE; 257 qm->sqe_log2_size = HPRE_SQE_LOG2_SIZE; 258 if (qm->fun_type == HISI_QM_HW_PF) { 259 hisi_qm_get_version(qm); 260 DMSG("HPRE hardware version is 0x%"PRIx32, qm->version); 261 qm->qp_base = HISI_QM_PF_Q_BASE; 262 qm->qp_num = HISI_QM_PF_Q_NUM; 263 qm->dev_status_check = hpre_dev_status_check; 264 } 265 266 return hisi_qm_init(qm); 267 } 268 269 static struct acc_device *hpre_pre_init(void) 270 { 271 struct acc_device *hpre_dev = NULL; 272 273 hpre_dev = calloc(1, sizeof(*hpre_dev)); 274 if (!hpre_dev) { 275 EMSG("Fail to alloc hpre_dev"); 276 return NULL; 277 } 278 279 hpre_dev->io_base = HPRE_BAR_BASE; 280 hpre_dev->io_size = HPRE_BAR_SIZE; 281 hpre_dev->fun_type = HISI_QM_HW_PF; 282 SLIST_INSERT_HEAD(&hpre_list, hpre_dev, link); 283 284 return hpre_dev; 285 } 286 287 static TEE_Result hpre_probe(void) 288 { 289 TEE_Result ret = TEE_ERROR_GENERIC; 290 struct acc_device *hpre_dev = NULL; 291 struct hisi_qm *qm = NULL; 292 293 DMSG("HPRE driver init start"); 294 hpre_dev = hpre_pre_init(); 295 if (!hpre_dev) 296 return TEE_ERROR_OUT_OF_MEMORY; 297 298 qm = &hpre_dev->qm; 299 if (hpre_qm_init(hpre_dev)) { 300 EMSG("Fail to init hpre qm"); 301 goto err_with_pre_init; 302 } 303 304 ret = hpre_engine_init(hpre_dev); 305 if (ret) { 306 EMSG("Fail to init engine"); 307 goto err_with_qm_init; 308 } 309 310 if (hisi_qm_start(qm)) { 311 EMSG("Fail to start qm"); 312 ret = TEE_ERROR_BAD_STATE; 313 goto err_with_qm_init; 314 } 315 316 DMSG("HPRE driver init done"); 317 return TEE_SUCCESS; 318 319 err_with_qm_init: 320 hisi_qm_uninit(qm); 321 err_with_pre_init: 322 SLIST_REMOVE_HEAD(&hpre_list, link); 323 free(hpre_dev); 324 325 return ret; 326 } 327 328 driver_init(hpre_probe); 329