xref: /optee_os/core/drivers/crypto/hisilicon/hisi_qm.c (revision 79f8990d9d28539864d8f97f9f1cb32e289e595f)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright 2022-2023 HiSilicon Limited.
4  * Kunpeng hardware accelerator queue management module.
5  */
6 #include "hisi_qm.h"
7 
8 #define QM_FVT_CFG_RDY_BIT	0x1
9 /* Doorbell */
10 #define QM_DOORBELL_SQ_CQ_BASE	0x1000
11 #define QM_DB_CMD_SHIFT		12
12 #define QM_DB_RAND_DATA_SHIFT	16
13 #define QM_DB_INDEX_SHIFT	32
14 #define QM_DB_PRIORITY_SHIFT	48
15 #define QM_DB_RAND_DATA		0x5a
16 #define QM_DOORBELL_CMD_SQ	0
17 #define QM_DOORBELL_CMD_CQ	1
18 /* Mailbox */
19 #define QM_MAILBOX_BASE		0x300
20 #define QM_MAILBOX_DATA_ADDR_L	0x304
21 #define QM_MAILBOX_DATA_ADDR_H	0x308
22 #define QM_MB_BUSY_SHIFT	13
23 #define QM_MB_BUSY_BIT		BIT32(QM_MB_BUSY_SHIFT)
24 #define QM_MB_OP_SHIFT		14
25 #define QM_MB_OP_WR		0
26 #define QM_MB_OP_RD		1
27 /* XQC_VFT */
28 #define QM_VFT_CFG_OP_ENABLE	0x100054
29 #define QM_VFT_CFG_OP_WR	0x100058
30 #define QM_VFT_CFG_TYPE		0x10005c
31 #define QM_VFT_CFG_ADDRESS	0x100060
32 #define QM_VFT_CFG_DATA_L	0x100064
33 #define QM_VFT_CFG_DATA_H	0x100068
34 #define QM_VFT_CFG_RDY		0x10006c
35 #define QM_SQC_VFT		0
36 #define QM_CQC_VFT		1
37 #define QM_SQC_VFT_START_SQN_SHIFT 28
38 #define QM_SQC_VFT_VALID	BIT64(44)
39 #define QM_SQC_VFT_SQ_NUM_SHIFT 45
40 #define QM_CQC_VFT_VALID	BIT(28)
41 #define QM_VFT_WRITE		0
42 #define QM_VFT_READ		1
43 #define QM_SQC_VFT_BASE_MASK	0x3ff
44 #define QM_SQC_VFT_NUM_MASK	0x3ff
45 /* QM INIT */
46 #define QM_MEM_START_INIT	0x100040
47 #define QM_MEM_INIT_DONE	0x100044
48 #define QM_VF_AEQ_INT_MASK	0x4
49 #define QM_VF_AEQ_INT_MASK_EN	0x1
50 #define QM_VF_EQ_INT_MASK	0xc
51 #define QM_VF_EQ_INT_MASK_EN	0x1
52 #define QM_ARUSER_M_CFG_1	0x100088
53 #define QM_ARUSER_M_CFG_ENABLE	0x100090
54 #define QM_AWUSER_M_CFG_1	0x100098
55 #define QM_AWUSER_M_CFG_ENABLE	0x1000a0
56 #define QM_AXUSER_CFG		0x40001070
57 #define AXUSER_M_CFG_ENABLE	0x7ffffc
58 #define QM_AXI_M_CFG		0x1000ac
59 #define AXI_M_CFG		0xffff
60 #define QM_PEH_AXUSER_CFG	0x1000cc
61 #define PEH_AXUSER_CFG		0x400801
62 #define QM_CACHE_CTL		0x100050
63 #define QM_CACHE_CFG		0x4893
64 #define QM_CACHE_WB_START	0x204
65 #define QM_CACHE_WB_DONE	0x208
66 /* XQC shift */
67 #define QM_SQ_SQE_SIZE_SHIFT	12
68 #define QM_SQ_ORDER_SHIFT	4
69 #define QM_SQ_TYPE_SHIFT	8
70 #define QM_CQE_SIZE		4
71 #define QM_CQ_CQE_SIZE_SHIFT	12
72 /* CQE */
73 #define QM_CQE_PHASE(cqe) (((cqe)->w7) & QM_FVT_CFG_RDY_BIT)
74 
75 enum qm_mailbox_common_cmd {
76 	QM_MB_CMD_SQC = 0x0,
77 	QM_MB_CMD_CQC,
78 	QM_MB_CMD_EQC,
79 	QM_MB_CMD_AEQC,
80 	QM_MB_CMD_SQC_BT,
81 	QM_MB_CMD_CQC_BT,
82 	QM_MB_CMD_SQC_VFT,
83 };
84 
85 enum qm_mailbox_cmd_v3 {
86 	QM_MB_CM_CLOSE_QM = 0x7,
87 	QM_MB_CMD_CLOSE_QP,
88 	QM_MB_CMD_FLUSH_QM,
89 	QM_MB_CMD_FLUSH_QP,
90 	QM_MB_CMD_SRC = 0xc,
91 	QM_MB_CMD_DST,
92 	QM_MB_CMD_STOP_QM,
93 };
94 
95 struct qm_mailbox {
96 	union {
97 		struct {
98 			uint16_t w0;
99 			uint16_t queue;
100 			uint32_t base_l;
101 			uint32_t base_h;
102 			uint32_t token;
103 		};
104 		uint64_t x[2];
105 	};
106 };
107 
108 struct qm_dfx_registers {
109 	const char *reg_name;
110 	uint32_t reg_offset;
111 };
112 
113 static const struct qm_dfx_registers qm_dfx_regs[] = {
114 	{ .reg_name = "QM_ECC_1BIT_CNT           ", .reg_offset = 0x104000 },
115 	{ .reg_name = "QM_ECC_MBIT_CNT           ", .reg_offset = 0x104008 },
116 	{ .reg_name = "QM_DFX_MB_CNT             ", .reg_offset = 0x104018 },
117 	{ .reg_name = "QM_DFX_DB_CNT             ", .reg_offset = 0x104028 },
118 	{ .reg_name = "QM_DFX_SQE_CNT            ", .reg_offset = 0x104038 },
119 	{ .reg_name = "QM_DFX_CQE_CNT            ", .reg_offset = 0x104048 },
120 	{ .reg_name = "QM_DFX_SEND_SQE_TO_ACC_CNT", .reg_offset = 0x104050 },
121 	{ .reg_name = "QM_DFX_WB_SQE_FROM_ACC_CNT", .reg_offset = 0x104058 },
122 	{ .reg_name = "QM_DFX_ACC_FINISH_CNT     ", .reg_offset = 0x104060 },
123 	{ .reg_name = "QM_DFX_CQE_ERR_CNT        ", .reg_offset = 0x1040b4 },
124 	{ }
125 };
126 
127 void hisi_qm_get_version(struct hisi_qm *qm)
128 {
129 	qm->version = io_read32(qm->io_base + HISI_QM_REVISON_ID_BASE) &
130 		      HISI_QM_REVISON_ID_MASK;
131 }
132 
133 static void qm_db(struct hisi_qm *qm, uint16_t qn, uint8_t cmd, uint16_t index,
134 		  uint8_t priority)
135 {
136 	uint64_t doorbell = 0;
137 
138 	doorbell = qn | SHIFT_U64(cmd, QM_DB_CMD_SHIFT) |
139 		   SHIFT_U64(QM_DB_RAND_DATA, QM_DB_RAND_DATA_SHIFT) |
140 		   SHIFT_U64(index, QM_DB_INDEX_SHIFT) |
141 		   SHIFT_U64(priority, QM_DB_PRIORITY_SHIFT);
142 
143 	io_write64(qm->io_base + QM_DOORBELL_SQ_CQ_BASE, doorbell);
144 }
145 
146 static enum hisi_drv_status qm_wait_mb_ready(struct hisi_qm *qm)
147 {
148 	uint32_t val = 0;
149 
150 	/* Return 0 mailbox ready, HISI_QM_DRVCRYPT_ETMOUT hardware timeout */
151 	if (IO_READ32_POLL_TIMEOUT(qm->io_base + QM_MAILBOX_BASE, val,
152 				   !(val & QM_MB_BUSY_BIT), POLL_PERIOD,
153 				   POLL_TIMEOUT)) {
154 		return HISI_QM_DRVCRYPT_ETMOUT;
155 	}
156 
157 	return HISI_QM_DRVCRYPT_NO_ERR;
158 }
159 
160 static void qm_mb_write(struct hisi_qm *qm, struct qm_mailbox *mb)
161 {
162 	vaddr_t dst = qm->io_base + QM_MAILBOX_BASE;
163 
164 	write_64bit_pair(dst, mb->x[0], mb->x[1]);
165 }
166 
167 static enum hisi_drv_status qm_mb(struct hisi_qm *qm, uint8_t cmd,
168 				  vaddr_t dma_addr, uint16_t qn, uint8_t op)
169 {
170 	struct qm_mailbox mb = { };
171 
172 	mb.w0 = cmd | SHIFT_U32(op, QM_MB_OP_SHIFT) |
173 		BIT32(QM_MB_BUSY_SHIFT);
174 	mb.queue = qn;
175 	reg_pair_from_64(dma_addr, &mb.base_h, &mb.base_l);
176 	mb.token = 0;
177 
178 	if (qm_wait_mb_ready(qm)) {
179 		EMSG("QM mailbox is busy");
180 		return HISI_QM_DRVCRYPT_EBUSY;
181 	}
182 
183 	qm_mb_write(qm, &mb);
184 
185 	if (qm_wait_mb_ready(qm)) {
186 		EMSG("QM mailbox operation timeout");
187 		return HISI_QM_DRVCRYPT_EBUSY;
188 	}
189 
190 	return HISI_QM_DRVCRYPT_NO_ERR;
191 }
192 
193 static void qm_cfg_vft_data(struct hisi_qm *qm, uint8_t vft_type,
194 			    uint32_t base, uint32_t number)
195 {
196 	uint32_t data_h = 0;
197 	uint32_t data_l = 0;
198 	uint64_t data = 0;
199 
200 	switch (vft_type) {
201 	case QM_SQC_VFT:
202 		data = SHIFT_U64(base, QM_SQC_VFT_START_SQN_SHIFT) |
203 			QM_SQC_VFT_VALID |
204 			SHIFT_U64((number - 1), QM_SQC_VFT_SQ_NUM_SHIFT);
205 		break;
206 	case QM_CQC_VFT:
207 		data = QM_CQC_VFT_VALID;
208 		break;
209 	default:
210 		panic("Invalid vft type");
211 	}
212 
213 	reg_pair_from_64(data, &data_h, &data_l);
214 	io_write32(qm->io_base + QM_VFT_CFG_DATA_L, data_l);
215 	io_write32(qm->io_base + QM_VFT_CFG_DATA_H, data_h);
216 }
217 
218 static enum hisi_drv_status qm_set_vft_common(struct hisi_qm *qm,
219 					      uint8_t vft_type,
220 					      uint32_t function,
221 					      uint32_t base,
222 					      uint32_t num)
223 {
224 	uint32_t val = 0;
225 
226 	if (IO_READ32_POLL_TIMEOUT(qm->io_base + QM_VFT_CFG_RDY, val,
227 				   val & QM_FVT_CFG_RDY_BIT, POLL_PERIOD,
228 				   POLL_TIMEOUT)) {
229 		EMSG("QM VFT is not ready");
230 		return HISI_QM_DRVCRYPT_EBUSY;
231 	}
232 
233 	io_write32(qm->io_base + QM_VFT_CFG_OP_WR, QM_VFT_WRITE);
234 	io_write32(qm->io_base + QM_VFT_CFG_TYPE, vft_type);
235 	io_write32(qm->io_base + QM_VFT_CFG_ADDRESS, function);
236 	qm_cfg_vft_data(qm, vft_type, base, num);
237 	io_write32(qm->io_base + QM_VFT_CFG_RDY, 0x0);
238 	io_write32(qm->io_base + QM_VFT_CFG_OP_ENABLE, QM_FVT_CFG_RDY_BIT);
239 
240 	if (IO_READ32_POLL_TIMEOUT(qm->io_base + QM_VFT_CFG_RDY, val,
241 				   val & QM_FVT_CFG_RDY_BIT, POLL_PERIOD,
242 				   POLL_TIMEOUT)) {
243 		EMSG("QM VFT is not ready");
244 		return HISI_QM_DRVCRYPT_EBUSY;
245 	}
246 
247 	return HISI_QM_DRVCRYPT_NO_ERR;
248 }
249 
250 static enum hisi_drv_status qm_set_xqc_vft(struct hisi_qm *qm,
251 					   uint32_t function,
252 					   uint32_t base, uint32_t num)
253 {
254 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
255 	int i = 0;
256 
257 	if (!num) {
258 		EMSG("Invalid sq num");
259 		return HISI_QM_DRVCRYPT_EINVAL;
260 	}
261 
262 	for (i = QM_SQC_VFT; i <= QM_CQC_VFT; i++) {
263 		ret = qm_set_vft_common(qm, i, function, base, num);
264 		if (ret) {
265 			EMSG("QM set type %d fail", i);
266 			return ret;
267 		}
268 	}
269 
270 	return HISI_QM_DRVCRYPT_NO_ERR;
271 }
272 
273 static enum hisi_drv_status qm_get_vft(struct hisi_qm *qm, uint32_t *base,
274 				       uint32_t *num)
275 {
276 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
277 	uint64_t sqc_vft = 0;
278 
279 	ret = qm_mb(qm, QM_MB_CMD_SQC_VFT, 0, 0, QM_MB_OP_RD);
280 	if (ret)
281 		return ret;
282 
283 	sqc_vft = io_read64(qm->io_base + QM_MAILBOX_DATA_ADDR_L);
284 	*base = (sqc_vft >> QM_SQC_VFT_START_SQN_SHIFT) & QM_SQC_VFT_BASE_MASK;
285 	*num = ((sqc_vft >> QM_SQC_VFT_SQ_NUM_SHIFT) & QM_SQC_VFT_NUM_MASK) + 1;
286 
287 	return HISI_QM_DRVCRYPT_NO_ERR;
288 }
289 
290 static void qp_free(struct hisi_qm *qm, uint32_t id)
291 {
292 	struct hisi_qp *qp = &qm->qp_array[id];
293 
294 	free(qp->sqe);
295 	free(qp->cqe);
296 }
297 
298 static enum hisi_drv_status qp_alloc(struct hisi_qm *qm, uint32_t id)
299 {
300 	size_t sq_size = qm->sqe_size * HISI_QM_Q_DEPTH;
301 	size_t cq_size = sizeof(struct qm_cqe) * HISI_QM_Q_DEPTH;
302 	struct hisi_qp *qp = &qm->qp_array[id];
303 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
304 
305 	qp->sqe = memalign(HISI_QM_ALIGN128, sq_size);
306 	if (!qp->sqe) {
307 		EMSG("Fail to malloc sq[%"PRIu32"]", id);
308 		return HISI_QM_DRVCRYPT_ENOMEM;
309 	}
310 	qp->sqe_dma = virt_to_phys(qp->sqe);
311 	qp->cqe = memalign(HISI_QM_ALIGN32, cq_size);
312 	if (!qp->cqe) {
313 		EMSG("Fail to malloc cq[%"PRIu32"]", id);
314 		ret = HISI_QM_DRVCRYPT_ENOMEM;
315 		goto free_sqe;
316 	}
317 	qp->cqe_dma = virt_to_phys(qp->cqe);
318 
319 	qp->qp_id = id;
320 	qp->qm = qm;
321 	return HISI_QM_DRVCRYPT_NO_ERR;
322 
323 free_sqe:
324 	free(qp->sqe);
325 	return ret;
326 }
327 
328 static void qm_free(struct hisi_qm *qm)
329 {
330 	unsigned int i = 0;
331 
332 	for (i = 0; i < qm->qp_num; i++)
333 		qp_free(qm, i);
334 
335 	free(qm->qp_array);
336 	free(qm->sqc);
337 	free(qm->cqc);
338 }
339 
340 static enum hisi_drv_status qm_alloc(struct hisi_qm *qm)
341 {
342 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
343 	size_t sqc_size = 0;
344 	size_t cqc_size = 0;
345 	unsigned int i = 0;
346 	int j = 0;
347 
348 	sqc_size = sizeof(struct qm_sqc) * qm->qp_num;
349 	cqc_size = sizeof(struct qm_cqc) * qm->qp_num;
350 
351 	qm->sqc = memalign(HISI_QM_ALIGN32, sqc_size);
352 	if (!qm->sqc) {
353 		EMSG("Fail to malloc sqc");
354 		return HISI_QM_DRVCRYPT_ENOMEM;
355 	}
356 	memset(qm->sqc, 0, sqc_size);
357 	qm->sqc_dma = virt_to_phys(qm->sqc);
358 
359 	qm->cqc = memalign(HISI_QM_ALIGN32, cqc_size);
360 	if (!qm->cqc) {
361 		EMSG("Fail to malloc cqc");
362 		ret = HISI_QM_DRVCRYPT_ENOMEM;
363 		goto free_sqc;
364 	}
365 	memset(qm->cqc, 0, cqc_size);
366 	qm->cqc_dma = virt_to_phys(qm->cqc);
367 
368 	qm->qp_array = calloc(qm->qp_num, sizeof(struct hisi_qp));
369 	if (!qm->qp_array) {
370 		EMSG("Fail to malloc qp_array");
371 		ret = HISI_QM_DRVCRYPT_ENOMEM;
372 		goto free_cqc;
373 	}
374 
375 	for (i = 0; i < qm->qp_num; i++) {
376 		ret = qp_alloc(qm, i);
377 		if (ret)
378 			goto free_qp_mem;
379 	}
380 
381 	return HISI_QM_DRVCRYPT_NO_ERR;
382 
383 free_qp_mem:
384 	for (j = (int)i - 1; j >= 0; j--)
385 		qp_free(qm, j);
386 	free(qm->qp_array);
387 free_cqc:
388 	free(qm->cqc);
389 free_sqc:
390 	free(qm->sqc);
391 	return ret;
392 }
393 
394 enum hisi_drv_status hisi_qm_init(struct hisi_qm *qm)
395 {
396 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
397 
398 	if (qm->fun_type == HISI_QM_HW_VF) {
399 		ret = qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
400 		if (ret) {
401 			EMSG("Fail to get function vft config");
402 			return ret;
403 		}
404 	}
405 
406 	if (!qm->qp_num || !qm->sqe_size) {
407 		EMSG("Invalid QM parameters");
408 		return HISI_QM_DRVCRYPT_EINVAL;
409 	}
410 
411 	ret = qm_alloc(qm);
412 	if (ret)
413 		return ret;
414 
415 	qm->qp_in_used = 0;
416 	qm->qp_idx = 0;
417 	mutex_init(&qm->qp_lock);
418 
419 	return HISI_QM_DRVCRYPT_NO_ERR;
420 }
421 
422 static void qm_cache_writeback(struct hisi_qm *qm)
423 {
424 	uint32_t val = 0;
425 
426 	io_write32(qm->io_base + QM_CACHE_WB_START, QM_FVT_CFG_RDY_BIT);
427 
428 	if (IO_READ32_POLL_TIMEOUT(qm->io_base + QM_CACHE_WB_DONE, val,
429 				   val & QM_FVT_CFG_RDY_BIT, POLL_PERIOD,
430 				   POLL_TIMEOUT))
431 		panic("QM writeback sqc cache fail");
432 }
433 
434 void hisi_qm_uninit(struct hisi_qm *qm)
435 {
436 	qm_cache_writeback(qm);
437 	qm_free(qm);
438 	mutex_destroy(&qm->qp_lock);
439 }
440 
441 static enum hisi_drv_status qm_hw_mem_reset(struct hisi_qm *qm)
442 {
443 	uint32_t val = 0;
444 
445 	io_write32(qm->io_base + QM_MEM_START_INIT, QM_FVT_CFG_RDY_BIT);
446 
447 	if (IO_READ32_POLL_TIMEOUT(qm->io_base + QM_MEM_INIT_DONE, val,
448 				   val & QM_FVT_CFG_RDY_BIT, POLL_PERIOD,
449 				   POLL_TIMEOUT))
450 		return HISI_QM_DRVCRYPT_EBUSY;
451 
452 	return HISI_QM_DRVCRYPT_NO_ERR;
453 }
454 
455 static enum hisi_drv_status qm_func_vft_cfg(struct hisi_qm *qm)
456 {
457 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
458 	uint32_t q_base = qm->qp_num;
459 	uint32_t act_q_num = 0;
460 	unsigned int i = 0;
461 	unsigned int j = 0;
462 
463 	if (!qm->vfs_num)
464 		return HISI_QM_DRVCRYPT_NO_ERR;
465 
466 	if (qm->vfs_num > HISI_QM_MAX_VFS_NUM) {
467 		EMSG("Invalid QM vfs_num");
468 		return HISI_QM_DRVCRYPT_EINVAL;
469 	}
470 
471 	for (i = 1; i <= qm->vfs_num; i++) {
472 		act_q_num = HISI_QM_VF_Q_NUM;
473 		ret = qm_set_xqc_vft(qm, i, q_base, act_q_num);
474 		if (ret) {
475 			for (j = 1; j < i; j++)
476 				(void)qm_set_xqc_vft(qm, j, 0, 0);
477 			return ret;
478 		}
479 		q_base += act_q_num;
480 	}
481 
482 	return HISI_QM_DRVCRYPT_NO_ERR;
483 }
484 
485 enum hisi_drv_status hisi_qm_start(struct hisi_qm *qm)
486 {
487 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
488 
489 	if (qm->fun_type == HISI_QM_HW_PF) {
490 		ret = qm_hw_mem_reset(qm);
491 		if (ret) {
492 			EMSG("Fail to reset QM hardware mem");
493 			return ret;
494 		}
495 
496 		ret = qm_set_xqc_vft(qm, 0, qm->qp_base, qm->qp_num);
497 		if (ret) {
498 			EMSG("Fail to set PF xqc_vft");
499 			return ret;
500 		}
501 
502 		ret = qm_func_vft_cfg(qm);
503 		if (ret) {
504 			EMSG("Fail to set VF xqc_vft");
505 			return ret;
506 		}
507 	}
508 
509 	ret = qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, QM_MB_OP_WR);
510 	if (ret) {
511 		EMSG("Fail to set sqc_bt");
512 		return ret;
513 	}
514 
515 	ret = qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, QM_MB_OP_WR);
516 	if (ret) {
517 		EMSG("Fail to set cqc_bt");
518 		return ret;
519 	}
520 
521 	/* Security mode does not support msi */
522 	io_write32(qm->io_base + QM_VF_AEQ_INT_MASK, QM_VF_AEQ_INT_MASK_EN);
523 	io_write32(qm->io_base + QM_VF_EQ_INT_MASK, QM_VF_EQ_INT_MASK_EN);
524 
525 	return HISI_QM_DRVCRYPT_NO_ERR;
526 }
527 
528 void hisi_qm_dev_init(struct hisi_qm *qm)
529 {
530 	if (qm->fun_type == HISI_QM_HW_VF)
531 		return;
532 
533 	/* QM user domain */
534 	io_write32(qm->io_base + QM_ARUSER_M_CFG_1, QM_AXUSER_CFG);
535 	io_write32(qm->io_base + QM_ARUSER_M_CFG_ENABLE, AXUSER_M_CFG_ENABLE);
536 	io_write32(qm->io_base + QM_AWUSER_M_CFG_1, QM_AXUSER_CFG);
537 	io_write32(qm->io_base + QM_AWUSER_M_CFG_ENABLE, AXUSER_M_CFG_ENABLE);
538 	/* QM cache */
539 	io_write32(qm->io_base + QM_AXI_M_CFG, AXI_M_CFG);
540 
541 	if (qm->version == HISI_QM_HW_V2) {
542 		/* Disable FLR triggered by BME(bus master enable) */
543 		io_write32(qm->io_base + QM_PEH_AXUSER_CFG, PEH_AXUSER_CFG);
544 		/* Set sec sqc and cqc cache wb threshold 4 */
545 		io_write32(qm->io_base + QM_CACHE_CTL, QM_CACHE_CFG);
546 	}
547 	/* Disable QM ras */
548 	io_write32(qm->io_base + HISI_QM_ABNML_INT_MASK,
549 		   HISI_QM_ABNML_INT_MASK_CFG);
550 }
551 
552 static enum hisi_drv_status qm_sqc_cfg(struct hisi_qp *qp)
553 {
554 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
555 	struct hisi_qm *qm = qp->qm;
556 	struct qm_sqc *sqc = NULL;
557 	paddr_t sqc_dma = 0;
558 
559 	sqc = memalign(HISI_QM_ALIGN32, sizeof(struct qm_sqc));
560 	if (!sqc)
561 		return HISI_QM_DRVCRYPT_ENOMEM;
562 
563 	sqc_dma = virt_to_phys(sqc);
564 
565 	memset(sqc, 0, sizeof(struct qm_sqc));
566 	reg_pair_from_64(qp->sqe_dma, &sqc->base_h, &sqc->base_l);
567 	sqc->dw3 = (HISI_QM_Q_DEPTH - 1) |
568 		    SHIFT_U32(qm->sqe_log2_size, QM_SQ_SQE_SIZE_SHIFT);
569 	sqc->rand_data = QM_DB_RAND_DATA;
570 	sqc->cq_num = qp->qp_id;
571 	sqc->w13 = BIT32(QM_SQ_ORDER_SHIFT) |
572 		   SHIFT_U32(qp->sq_type, QM_SQ_TYPE_SHIFT);
573 
574 	ret = qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp->qp_id, QM_MB_OP_WR);
575 	free(sqc);
576 
577 	return ret;
578 }
579 
580 static enum hisi_drv_status qm_cqc_cfg(struct hisi_qp *qp)
581 {
582 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
583 	struct hisi_qm *qm = qp->qm;
584 	struct qm_cqc *cqc = NULL;
585 	paddr_t cqc_dma = 0;
586 
587 	cqc = memalign(HISI_QM_ALIGN32, sizeof(struct qm_cqc));
588 	if (!cqc)
589 		return HISI_QM_DRVCRYPT_ENOMEM;
590 
591 	cqc_dma = virt_to_phys(cqc);
592 
593 	memset(cqc, 0, sizeof(struct qm_cqc));
594 	reg_pair_from_64(qp->cqe_dma, &cqc->base_h, &cqc->base_l);
595 	cqc->dw3 = (HISI_QM_Q_DEPTH - 1) |
596 		    SHIFT_U32(QM_CQE_SIZE, QM_CQ_CQE_SIZE_SHIFT);
597 	cqc->rand_data = QM_DB_RAND_DATA;
598 	cqc->dw6 = PHASE_DEFAULT_VAL;
599 
600 	ret = qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp->qp_id, QM_MB_OP_WR);
601 	free(cqc);
602 
603 	return ret;
604 }
605 
606 struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, uint8_t sq_type)
607 {
608 	struct hisi_qp *qp = NULL;
609 
610 	mutex_lock(&qm->qp_lock);
611 	if (qm->qp_in_used == qm->qp_num) {
612 		EMSG("All %"PRIu32" queues of QM are busy", qm->qp_num);
613 		goto err_proc;
614 	}
615 
616 	if (qm->qp_idx == qm->qp_num - 1)
617 		qm->qp_idx = 0;
618 	else
619 		qm->qp_idx++;
620 
621 	qp = &qm->qp_array[qm->qp_idx];
622 	memset(qp->cqe, 0, sizeof(struct qm_cqe) * HISI_QM_Q_DEPTH);
623 	qp->sq_type = sq_type;
624 	qp->sq_tail = 0;
625 	qp->cq_head = 0;
626 	qp->cqc_phase = true;
627 
628 	if (qm_sqc_cfg(qp)) {
629 		EMSG("Fail to set qp[%"PRIu32"] sqc", qp->qp_id);
630 		goto err_proc;
631 	}
632 
633 	if (qm_cqc_cfg(qp)) {
634 		EMSG("Fail to set qp[%"PRIu32"] cqc", qp->qp_id);
635 		goto err_proc;
636 	}
637 
638 	qm->qp_in_used++;
639 	mutex_unlock(&qm->qp_lock);
640 	return qp;
641 
642 err_proc:
643 	qp->sq_type = 0;
644 	qp->cqc_phase = false;
645 	mutex_unlock(&qm->qp_lock);
646 	return NULL;
647 }
648 
649 void hisi_qm_release_qp(struct hisi_qp *qp)
650 {
651 	struct hisi_qm *qm = NULL;
652 
653 	if (!qp) {
654 		EMSG("QP is NULL");
655 		return;
656 	}
657 
658 	qm = qp->qm;
659 	mutex_lock(&qm->qp_lock);
660 	qm->qp_in_used--;
661 	mutex_unlock(&qm->qp_lock);
662 }
663 
664 static void qm_sq_tail_update(struct hisi_qp *qp)
665 {
666 	if (qp->sq_tail == HISI_QM_Q_DEPTH - 1)
667 		qp->sq_tail = 0;
668 	else
669 		qp->sq_tail++;
670 }
671 
672 /*
673  * One task thread will just bind to one hardware queue, and
674  * hardware does not support msi. So we have no lock here.
675  */
676 enum hisi_drv_status hisi_qp_send(struct hisi_qp *qp, void *msg)
677 {
678 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
679 	struct hisi_qm *qm = NULL;
680 	void *sqe = NULL;
681 
682 	if (!qp) {
683 		EMSG("QP is NULL");
684 		return HISI_QM_DRVCRYPT_EINVAL;
685 	}
686 
687 	qm = qp->qm;
688 	ret = qm->dev_status_check(qm);
689 	if (ret)
690 		return ret;
691 
692 	sqe = (void *)((vaddr_t)qp->sqe + qm->sqe_size * qp->sq_tail);
693 	memset(sqe, 0, qm->sqe_size);
694 
695 	ret = qp->fill_sqe(sqe, msg);
696 	if (ret) {
697 		EMSG("Fail to fill sqe");
698 		return ret;
699 	}
700 
701 	qm_sq_tail_update(qp);
702 
703 	dsb();
704 	qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_SQ, qp->sq_tail, 0);
705 
706 	return HISI_QM_DRVCRYPT_NO_ERR;
707 }
708 
709 static void qm_cq_head_update(struct hisi_qp *qp)
710 {
711 	if (qp->cq_head == HISI_QM_Q_DEPTH - 1) {
712 		qp->cqc_phase = !qp->cqc_phase;
713 		qp->cq_head = 0;
714 	} else {
715 		qp->cq_head++;
716 	}
717 }
718 
719 static enum hisi_drv_status hisi_qp_recv(struct hisi_qp *qp, void *msg)
720 {
721 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
722 	struct hisi_qm *qm = qp->qm;
723 	struct qm_cqe *cqe = NULL;
724 	void *sqe = NULL;
725 
726 	ret = qm->dev_status_check(qm);
727 	if (ret)
728 		return ret;
729 
730 	cqe = qp->cqe + qp->cq_head;
731 	if (QM_CQE_PHASE(cqe) == qp->cqc_phase) {
732 		dsb_osh();
733 		sqe = (void *)((vaddr_t)qp->sqe + qm->sqe_size * cqe->sq_head);
734 		ret = qp->parse_sqe(sqe, msg);
735 		qm_cq_head_update(qp);
736 		qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->cq_head, 0);
737 		if (ret) {
738 			EMSG("Fail to parse sqe");
739 			return ret;
740 		}
741 	} else {
742 		return HISI_QM_DRVCRYPT_NO_ERR;
743 	}
744 
745 	return HISI_QM_DRVCRYPT_RECV_DONE;
746 }
747 
748 static void qm_dfx_dump(struct hisi_qm *qm)
749 {
750 	const struct qm_dfx_registers *regs = qm_dfx_regs;
751 	__maybe_unused uint32_t val = 0;
752 
753 	if (qm->fun_type == HISI_QM_HW_VF)
754 		return;
755 
756 	while (regs->reg_name) {
757 		val = io_read32(qm->io_base + regs->reg_offset);
758 		EMSG("%s= 0x%" PRIx32, regs->reg_name, val);
759 		regs++;
760 	}
761 }
762 
763 enum hisi_drv_status hisi_qp_recv_sync(struct hisi_qp *qp, void *msg)
764 {
765 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
766 	uint32_t cnt = 0;
767 
768 	if (!qp) {
769 		EMSG("QP is NULL");
770 		return HISI_QM_DRVCRYPT_EINVAL;
771 	}
772 
773 	while (true) {
774 		ret = hisi_qp_recv(qp, msg);
775 		if (ret == HISI_QM_DRVCRYPT_NO_ERR) {
776 			cnt++;
777 			if (cnt > HISI_QM_RECV_SYNC_TIMEOUT) {
778 				EMSG("QM recv task timeout");
779 				qm_dfx_dump(qp->qm);
780 				return HISI_QM_DRVCRYPT_ETMOUT;
781 			}
782 		} else {
783 			if (ret == HISI_QM_DRVCRYPT_RECV_DONE)
784 				return HISI_QM_DRVCRYPT_NO_ERR;
785 
786 			EMSG("QM recv task error");
787 			qm_dfx_dump(qp->qm);
788 			return ret;
789 		}
790 	}
791 }
792