xref: /optee_os/core/drivers/crypto/hisilicon/hisi_qm.c (revision f77e5952a87d26326ea0073086f77a101de47666)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright 2022-2023 HiSilicon Limited.
4  * Kunpeng hardware accelerator queue management module.
5  */
6 #include "hisi_qm.h"
7 
8 #define QM_FVT_CFG_RDY_BIT	0x1
9 /* Doorbell */
10 #define QM_DOORBELL_SQ_CQ_BASE	0x1000
11 #define QM_DB_CMD_SHIFT		12
12 #define QM_DB_RAND_DATA_SHIFT	16
13 #define QM_DB_INDEX_SHIFT	32
14 #define QM_DB_PRIORITY_SHIFT	48
15 #define QM_DB_RAND_DATA		0x5a
16 #define QM_DOORBELL_CMD_SQ	0
17 #define QM_DOORBELL_CMD_CQ	1
18 /* Mailbox */
19 #define QM_MAILBOX_BASE		0x300
20 #define QM_MAILBOX_DATA_ADDR_L	0x304
21 #define QM_MAILBOX_DATA_ADDR_H	0x308
22 #define QM_MB_BUSY_SHIFT	13
23 #define QM_MB_BUSY_BIT		BIT32(QM_MB_BUSY_SHIFT)
24 #define QM_MB_OP_SHIFT		14
25 #define QM_MB_OP_WR		0
26 #define QM_MB_OP_RD		1
27 #define QM_MB_STATUS_MASK	GENMASK_32(12, 9)
28 #define QM_MB_WAIT_READY_CNT	10
29 #define QM_MB_WAIT_MAX_CNT	21000
30 #define QM_MB_WAIT_PERIOD	200
31 /* XQC_VFT */
32 #define QM_VFT_CFG_OP_ENABLE	0x100054
33 #define QM_VFT_CFG_OP_WR	0x100058
34 #define QM_VFT_CFG_TYPE		0x10005c
35 #define QM_VFT_CFG_ADDRESS	0x100060
36 #define QM_VFT_CFG_DATA_L	0x100064
37 #define QM_VFT_CFG_DATA_H	0x100068
38 #define QM_VFT_CFG_RDY		0x10006c
39 #define QM_SQC_VFT		0
40 #define QM_CQC_VFT		1
41 #define QM_SQC_VFT_START_SQN_SHIFT 28
42 #define QM_SQC_VFT_VALID	BIT64(44)
43 #define QM_SQC_VFT_SQ_NUM_SHIFT 45
44 #define QM_CQC_VFT_VALID	BIT(28)
45 #define QM_VFT_WRITE		0
46 #define QM_VFT_READ		1
47 #define QM_SQC_VFT_BASE_MASK	0x3ff
48 #define QM_SQC_VFT_NUM_MASK	0x3ff
49 /* QM INIT */
50 #define QM_MEM_START_INIT	0x100040
51 #define QM_MEM_INIT_DONE	0x100044
52 #define QM_VF_AEQ_INT_MASK	0x4
53 #define QM_VF_AEQ_INT_MASK_EN	0x1
54 #define QM_VF_EQ_INT_MASK	0xc
55 #define QM_VF_EQ_INT_MASK_EN	0x1
56 #define QM_ARUSER_M_CFG_1	0x100088
57 #define QM_ARUSER_M_CFG_ENABLE	0x100090
58 #define QM_AWUSER_M_CFG_1	0x100098
59 #define QM_AWUSER_M_CFG_ENABLE	0x1000a0
60 #define QM_AXUSER_CFG		0x40001070
61 #define AXUSER_M_CFG_ENABLE	0x7ffffc
62 #define QM_AXI_M_CFG		0x1000ac
63 #define AXI_M_CFG		0xffff
64 #define QM_PEH_AXUSER_CFG	0x1000cc
65 #define PEH_AXUSER_CFG		0x400801
66 #define QM_CACHE_CTL		0x100050
67 #define QM_CACHE_CFG		0x4893
68 #define QM_CACHE_WB_START	0x204
69 #define QM_CACHE_WB_DONE	0x208
70 /* XQC shift */
71 #define QM_SQ_SQE_SIZE_SHIFT	12
72 #define QM_SQ_ORDER_SHIFT	4
73 #define QM_SQ_TYPE_SHIFT	8
74 #define QM_CQE_SIZE		4
75 #define QM_CQ_CQE_SIZE_SHIFT	12
76 /* CQE */
77 #define QM_CQE_PHASE(cqe) (((cqe)->w7) & QM_FVT_CFG_RDY_BIT)
78 
79 enum qm_mailbox_common_cmd {
80 	QM_MB_CMD_SQC = 0x0,
81 	QM_MB_CMD_CQC,
82 	QM_MB_CMD_EQC,
83 	QM_MB_CMD_AEQC,
84 	QM_MB_CMD_SQC_BT,
85 	QM_MB_CMD_CQC_BT,
86 	QM_MB_CMD_SQC_VFT,
87 };
88 
89 enum qm_mailbox_cmd_v3 {
90 	QM_MB_CM_CLOSE_QM = 0x7,
91 	QM_MB_CMD_CLOSE_QP,
92 	QM_MB_CMD_FLUSH_QM,
93 	QM_MB_CMD_FLUSH_QP,
94 	QM_MB_CMD_SRC = 0xc,
95 	QM_MB_CMD_DST,
96 	QM_MB_CMD_STOP_QM,
97 };
98 
99 struct qm_mailbox {
100 	union {
101 		struct {
102 			uint16_t w0;
103 			uint16_t queue;
104 			uint32_t base_l;
105 			uint32_t base_h;
106 			uint32_t token;
107 		};
108 		uint64_t x[2];
109 	};
110 };
111 
112 struct qm_dfx_registers {
113 	const char *reg_name;
114 	uint32_t reg_offset;
115 };
116 
117 static const struct qm_dfx_registers qm_dfx_regs[] = {
118 	{ .reg_name = "QM_ECC_1BIT_CNT           ", .reg_offset = 0x104000 },
119 	{ .reg_name = "QM_ECC_MBIT_CNT           ", .reg_offset = 0x104008 },
120 	{ .reg_name = "QM_DFX_MB_CNT             ", .reg_offset = 0x104018 },
121 	{ .reg_name = "QM_DFX_DB_CNT             ", .reg_offset = 0x104028 },
122 	{ .reg_name = "QM_DFX_SQE_CNT            ", .reg_offset = 0x104038 },
123 	{ .reg_name = "QM_DFX_CQE_CNT            ", .reg_offset = 0x104048 },
124 	{ .reg_name = "QM_DFX_SEND_SQE_TO_ACC_CNT", .reg_offset = 0x104050 },
125 	{ .reg_name = "QM_DFX_WB_SQE_FROM_ACC_CNT", .reg_offset = 0x104058 },
126 	{ .reg_name = "QM_DFX_ACC_FINISH_CNT     ", .reg_offset = 0x104060 },
127 	{ .reg_name = "QM_DFX_CQE_ERR_CNT        ", .reg_offset = 0x1040b4 },
128 	{ }
129 };
130 
131 void hisi_qm_get_version(struct hisi_qm *qm)
132 {
133 	qm->version = io_read32(qm->io_base + HISI_QM_REVISON_ID_BASE) &
134 		      HISI_QM_REVISON_ID_MASK;
135 }
136 
137 static void qm_db(struct hisi_qm *qm, uint16_t qn, uint8_t cmd, uint16_t index,
138 		  uint8_t priority)
139 {
140 	uint64_t doorbell = 0;
141 
142 	doorbell = qn | SHIFT_U64(cmd, QM_DB_CMD_SHIFT) |
143 		   SHIFT_U64(QM_DB_RAND_DATA, QM_DB_RAND_DATA_SHIFT) |
144 		   SHIFT_U64(index, QM_DB_INDEX_SHIFT) |
145 		   SHIFT_U64(priority, QM_DB_PRIORITY_SHIFT);
146 
147 	io_write64(qm->io_base + QM_DOORBELL_SQ_CQ_BASE, doorbell);
148 }
149 
150 static void qm_mb_write(struct hisi_qm *qm, struct qm_mailbox *mb)
151 {
152 	vaddr_t dst = qm->io_base + QM_MAILBOX_BASE;
153 
154 	write_64bit_pair(dst, mb->x[1], mb->x[0]);
155 	dsb_osh();
156 }
157 
158 static void qm_mb_read(struct hisi_qm *qm, struct qm_mailbox *mb)
159 {
160 	vaddr_t mb_base = qm->io_base + QM_MAILBOX_BASE;
161 
162 	read_64bit_pair(mb_base, mb->x + 1, mb->x);
163 	dsb_osh();
164 }
165 
166 static enum hisi_drv_status qm_wait_mb_ready(struct hisi_qm *qm)
167 {
168 	struct qm_mailbox mb = { };
169 	uint32_t timeout = 0;
170 
171 	timeout = timeout_init_us(QM_MB_WAIT_PERIOD * QM_MB_WAIT_READY_CNT);
172 	while (!timeout_elapsed(timeout)) {
173 		/* 128 bits should be read from hardware at one time*/
174 		qm_mb_read(qm, &mb);
175 		if (!(mb.w0 & QM_MB_BUSY_BIT))
176 			return HISI_QM_DRVCRYPT_NO_ERR;
177 	}
178 
179 	EMSG("QM mailbox is busy to start!");
180 
181 	return HISI_QM_DRVCRYPT_EBUSY;
182 }
183 
184 static enum hisi_drv_status qm_wait_mb_finish(struct hisi_qm *qm,
185 					      struct qm_mailbox *mb)
186 {
187 	uint32_t timeout = 0;
188 
189 	timeout = timeout_init_us(QM_MB_WAIT_PERIOD * QM_MB_WAIT_MAX_CNT);
190 	while (!timeout_elapsed(timeout)) {
191 		qm_mb_read(qm, mb);
192 		if (!(mb->w0 & QM_MB_BUSY_BIT)) {
193 			if (mb->w0 & QM_MB_STATUS_MASK) {
194 				EMSG("QM mailbox operation failed!");
195 				return HISI_QM_DRVCRYPT_EIO;
196 			} else {
197 				return HISI_QM_DRVCRYPT_NO_ERR;
198 			}
199 		}
200 	}
201 
202 	return HISI_QM_DRVCRYPT_ETMOUT;
203 }
204 
205 static void qm_mb_init(struct qm_mailbox *mb, uint8_t cmd, uint64_t base,
206 		       uint16_t qnum, uint8_t op)
207 {
208 	mb->w0 = cmd | SHIFT_U32(op, QM_MB_OP_SHIFT) |  QM_MB_BUSY_BIT;
209 	mb->queue = qnum;
210 	reg_pair_from_64(base, &mb->base_h, &mb->base_l);
211 	mb->token = 0;
212 }
213 
214 static enum hisi_drv_status qm_mb_nolock(struct hisi_qm *qm,
215 					 struct qm_mailbox *mb)
216 {
217 	if (qm_wait_mb_ready(qm))
218 		return HISI_QM_DRVCRYPT_EBUSY;
219 
220 	qm_mb_write(qm, mb);
221 
222 	return qm_wait_mb_finish(qm, mb);
223 }
224 
225 static enum hisi_drv_status hisi_qm_mb_write(struct hisi_qm *qm, uint8_t cmd,
226 					     uintptr_t dma_addr, uint16_t qnum)
227 {
228 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
229 	struct qm_mailbox mb = { };
230 
231 	qm_mb_init(&mb, cmd, dma_addr, qnum, QM_MB_OP_WR);
232 	mutex_lock(&qm->mailbox_lock);
233 	ret = qm_mb_nolock(qm, &mb);
234 	mutex_unlock(&qm->mailbox_lock);
235 
236 	return ret;
237 }
238 
239 static enum hisi_drv_status hisi_qm_mb_read(struct hisi_qm *qm, uint64_t *base,
240 					    uint8_t cmd, uint16_t qnum)
241 {
242 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
243 	struct qm_mailbox mb = { };
244 
245 	qm_mb_init(&mb, cmd, 0, qnum, QM_MB_OP_RD);
246 	mutex_lock(&qm->mailbox_lock);
247 	ret = qm_mb_nolock(qm, &mb);
248 	mutex_unlock(&qm->mailbox_lock);
249 	if (ret)
250 		return ret;
251 
252 	reg_pair_from_64(*base, &mb.base_h, &mb.base_l);
253 
254 	return HISI_QM_DRVCRYPT_NO_ERR;
255 }
256 
257 static void qm_cfg_vft_data(struct hisi_qm *qm, uint8_t vft_type,
258 			    uint32_t base, uint32_t number)
259 {
260 	uint32_t data_h = 0;
261 	uint32_t data_l = 0;
262 	uint64_t data = 0;
263 
264 	switch (vft_type) {
265 	case QM_SQC_VFT:
266 		data = SHIFT_U64(base, QM_SQC_VFT_START_SQN_SHIFT) |
267 			QM_SQC_VFT_VALID |
268 			SHIFT_U64((number - 1), QM_SQC_VFT_SQ_NUM_SHIFT);
269 		break;
270 	case QM_CQC_VFT:
271 		data = QM_CQC_VFT_VALID;
272 		break;
273 	default:
274 		panic("Invalid vft type");
275 	}
276 
277 	reg_pair_from_64(data, &data_h, &data_l);
278 	io_write32(qm->io_base + QM_VFT_CFG_DATA_L, data_l);
279 	io_write32(qm->io_base + QM_VFT_CFG_DATA_H, data_h);
280 }
281 
282 static enum hisi_drv_status qm_set_vft_common(struct hisi_qm *qm,
283 					      uint8_t vft_type,
284 					      uint32_t function,
285 					      uint32_t base,
286 					      uint32_t num)
287 {
288 	uint32_t val = 0;
289 
290 	if (IO_READ32_POLL_TIMEOUT(qm->io_base + QM_VFT_CFG_RDY, val,
291 				   val & QM_FVT_CFG_RDY_BIT, POLL_PERIOD,
292 				   POLL_TIMEOUT)) {
293 		EMSG("QM VFT is not ready");
294 		return HISI_QM_DRVCRYPT_EBUSY;
295 	}
296 
297 	io_write32(qm->io_base + QM_VFT_CFG_OP_WR, QM_VFT_WRITE);
298 	io_write32(qm->io_base + QM_VFT_CFG_TYPE, vft_type);
299 	io_write32(qm->io_base + QM_VFT_CFG_ADDRESS, function);
300 	qm_cfg_vft_data(qm, vft_type, base, num);
301 	io_write32(qm->io_base + QM_VFT_CFG_RDY, 0x0);
302 	io_write32(qm->io_base + QM_VFT_CFG_OP_ENABLE, QM_FVT_CFG_RDY_BIT);
303 
304 	if (IO_READ32_POLL_TIMEOUT(qm->io_base + QM_VFT_CFG_RDY, val,
305 				   val & QM_FVT_CFG_RDY_BIT, POLL_PERIOD,
306 				   POLL_TIMEOUT)) {
307 		EMSG("QM VFT is not ready");
308 		return HISI_QM_DRVCRYPT_EBUSY;
309 	}
310 
311 	return HISI_QM_DRVCRYPT_NO_ERR;
312 }
313 
314 static enum hisi_drv_status qm_set_xqc_vft(struct hisi_qm *qm,
315 					   uint32_t function,
316 					   uint32_t base, uint32_t num)
317 {
318 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
319 	int i = 0;
320 
321 	if (!num) {
322 		EMSG("Invalid sq num");
323 		return HISI_QM_DRVCRYPT_EINVAL;
324 	}
325 
326 	for (i = QM_SQC_VFT; i <= QM_CQC_VFT; i++) {
327 		ret = qm_set_vft_common(qm, i, function, base, num);
328 		if (ret) {
329 			EMSG("QM set type %d fail", i);
330 			return ret;
331 		}
332 	}
333 
334 	return HISI_QM_DRVCRYPT_NO_ERR;
335 }
336 
337 static enum hisi_drv_status qm_get_vft(struct hisi_qm *qm, uint32_t *base,
338 				       uint32_t *num)
339 {
340 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
341 	uint64_t sqc_vft = 0;
342 
343 	ret = hisi_qm_mb_read(qm, &sqc_vft, QM_MB_CMD_SQC_VFT, 0);
344 	if (ret)
345 		return ret;
346 
347 	*base = (sqc_vft >> QM_SQC_VFT_START_SQN_SHIFT) & QM_SQC_VFT_BASE_MASK;
348 	*num = ((sqc_vft >> QM_SQC_VFT_SQ_NUM_SHIFT) & QM_SQC_VFT_NUM_MASK) + 1;
349 
350 	return HISI_QM_DRVCRYPT_NO_ERR;
351 }
352 
353 static void qp_free(struct hisi_qm *qm, uint32_t id)
354 {
355 	struct hisi_qp *qp = &qm->qp_array[id];
356 
357 	free(qp->sqe);
358 	free(qp->cqe);
359 }
360 
361 static enum hisi_drv_status qp_alloc(struct hisi_qm *qm, uint32_t id)
362 {
363 	size_t sq_size = qm->sqe_size * HISI_QM_Q_DEPTH;
364 	size_t cq_size = sizeof(struct qm_cqe) * HISI_QM_Q_DEPTH;
365 	struct hisi_qp *qp = &qm->qp_array[id];
366 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
367 
368 	qp->sqe = memalign(HISI_QM_ALIGN128, sq_size);
369 	if (!qp->sqe) {
370 		EMSG("Fail to malloc sq[%"PRIu32"]", id);
371 		return HISI_QM_DRVCRYPT_ENOMEM;
372 	}
373 	qp->sqe_dma = virt_to_phys(qp->sqe);
374 	qp->cqe = memalign(HISI_QM_ALIGN32, cq_size);
375 	if (!qp->cqe) {
376 		EMSG("Fail to malloc cq[%"PRIu32"]", id);
377 		ret = HISI_QM_DRVCRYPT_ENOMEM;
378 		goto free_sqe;
379 	}
380 	qp->cqe_dma = virt_to_phys(qp->cqe);
381 
382 	qp->qp_id = id;
383 	qp->qm = qm;
384 	return HISI_QM_DRVCRYPT_NO_ERR;
385 
386 free_sqe:
387 	free(qp->sqe);
388 	return ret;
389 }
390 
391 static void qm_free(struct hisi_qm *qm)
392 {
393 	unsigned int i = 0;
394 
395 	for (i = 0; i < qm->qp_num; i++)
396 		qp_free(qm, i);
397 
398 	free(qm->qp_array);
399 	free(qm->sqc);
400 	free(qm->cqc);
401 }
402 
403 static enum hisi_drv_status qm_alloc(struct hisi_qm *qm)
404 {
405 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
406 	size_t sqc_size = 0;
407 	size_t cqc_size = 0;
408 	unsigned int i = 0;
409 	int j = 0;
410 
411 	sqc_size = sizeof(struct qm_sqc) * qm->qp_num;
412 	cqc_size = sizeof(struct qm_cqc) * qm->qp_num;
413 
414 	qm->sqc = memalign(HISI_QM_ALIGN32, sqc_size);
415 	if (!qm->sqc) {
416 		EMSG("Fail to malloc sqc");
417 		return HISI_QM_DRVCRYPT_ENOMEM;
418 	}
419 	memset(qm->sqc, 0, sqc_size);
420 	qm->sqc_dma = virt_to_phys(qm->sqc);
421 
422 	qm->cqc = memalign(HISI_QM_ALIGN32, cqc_size);
423 	if (!qm->cqc) {
424 		EMSG("Fail to malloc cqc");
425 		ret = HISI_QM_DRVCRYPT_ENOMEM;
426 		goto free_sqc;
427 	}
428 	memset(qm->cqc, 0, cqc_size);
429 	qm->cqc_dma = virt_to_phys(qm->cqc);
430 
431 	qm->qp_array = calloc(qm->qp_num, sizeof(struct hisi_qp));
432 	if (!qm->qp_array) {
433 		EMSG("Fail to malloc qp_array");
434 		ret = HISI_QM_DRVCRYPT_ENOMEM;
435 		goto free_cqc;
436 	}
437 
438 	for (i = 0; i < qm->qp_num; i++) {
439 		ret = qp_alloc(qm, i);
440 		if (ret)
441 			goto free_qp_mem;
442 	}
443 
444 	return HISI_QM_DRVCRYPT_NO_ERR;
445 
446 free_qp_mem:
447 	for (j = (int)i - 1; j >= 0; j--)
448 		qp_free(qm, j);
449 	free(qm->qp_array);
450 free_cqc:
451 	free(qm->cqc);
452 free_sqc:
453 	free(qm->sqc);
454 	return ret;
455 }
456 
457 enum hisi_drv_status hisi_qm_init(struct hisi_qm *qm)
458 {
459 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
460 
461 	if (qm->fun_type == HISI_QM_HW_VF) {
462 		ret = qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
463 		if (ret) {
464 			EMSG("Fail to get function vft config");
465 			return ret;
466 		}
467 	}
468 
469 	if (!qm->qp_num || !qm->sqe_size) {
470 		EMSG("Invalid QM parameters");
471 		return HISI_QM_DRVCRYPT_EINVAL;
472 	}
473 
474 	ret = qm_alloc(qm);
475 	if (ret)
476 		return ret;
477 
478 	qm->qp_in_used = 0;
479 	qm->qp_idx = 0;
480 	mutex_init(&qm->qp_lock);
481 	mutex_init(&qm->mailbox_lock);
482 
483 	return HISI_QM_DRVCRYPT_NO_ERR;
484 }
485 
486 static void qm_cache_writeback(struct hisi_qm *qm)
487 {
488 	uint32_t val = 0;
489 
490 	io_write32(qm->io_base + QM_CACHE_WB_START, QM_FVT_CFG_RDY_BIT);
491 
492 	if (IO_READ32_POLL_TIMEOUT(qm->io_base + QM_CACHE_WB_DONE, val,
493 				   val & QM_FVT_CFG_RDY_BIT, POLL_PERIOD,
494 				   POLL_TIMEOUT))
495 		panic("QM writeback sqc cache fail");
496 }
497 
498 void hisi_qm_uninit(struct hisi_qm *qm)
499 {
500 	qm_cache_writeback(qm);
501 	qm_free(qm);
502 	mutex_destroy(&qm->qp_lock);
503 	mutex_destroy(&qm->mailbox_lock);
504 }
505 
506 static enum hisi_drv_status qm_hw_mem_reset(struct hisi_qm *qm)
507 {
508 	uint32_t val = 0;
509 
510 	io_write32(qm->io_base + QM_MEM_START_INIT, QM_FVT_CFG_RDY_BIT);
511 
512 	if (IO_READ32_POLL_TIMEOUT(qm->io_base + QM_MEM_INIT_DONE, val,
513 				   val & QM_FVT_CFG_RDY_BIT, POLL_PERIOD,
514 				   POLL_TIMEOUT))
515 		return HISI_QM_DRVCRYPT_EBUSY;
516 
517 	return HISI_QM_DRVCRYPT_NO_ERR;
518 }
519 
520 static enum hisi_drv_status qm_func_vft_cfg(struct hisi_qm *qm)
521 {
522 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
523 	uint32_t q_base = qm->qp_num;
524 	uint32_t act_q_num = 0;
525 	unsigned int i = 0;
526 	unsigned int j = 0;
527 
528 	if (!qm->vfs_num)
529 		return HISI_QM_DRVCRYPT_NO_ERR;
530 
531 	if (qm->vfs_num > HISI_QM_MAX_VFS_NUM) {
532 		EMSG("Invalid QM vfs_num");
533 		return HISI_QM_DRVCRYPT_EINVAL;
534 	}
535 
536 	for (i = 1; i <= qm->vfs_num; i++) {
537 		act_q_num = HISI_QM_VF_Q_NUM;
538 		ret = qm_set_xqc_vft(qm, i, q_base, act_q_num);
539 		if (ret) {
540 			for (j = 1; j < i; j++)
541 				(void)qm_set_xqc_vft(qm, j, 0, 0);
542 			return ret;
543 		}
544 		q_base += act_q_num;
545 	}
546 
547 	return HISI_QM_DRVCRYPT_NO_ERR;
548 }
549 
550 enum hisi_drv_status hisi_qm_start(struct hisi_qm *qm)
551 {
552 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
553 
554 	if (qm->fun_type == HISI_QM_HW_PF) {
555 		ret = qm_hw_mem_reset(qm);
556 		if (ret) {
557 			EMSG("Fail to reset QM hardware mem");
558 			return ret;
559 		}
560 
561 		ret = qm_set_xqc_vft(qm, 0, qm->qp_base, qm->qp_num);
562 		if (ret) {
563 			EMSG("Fail to set PF xqc_vft");
564 			return ret;
565 		}
566 
567 		ret = qm_func_vft_cfg(qm);
568 		if (ret) {
569 			EMSG("Fail to set VF xqc_vft");
570 			return ret;
571 		}
572 	}
573 
574 	ret = hisi_qm_mb_write(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0);
575 	if (ret) {
576 		EMSG("Fail to set sqc_bt");
577 		return ret;
578 	}
579 
580 	ret = hisi_qm_mb_write(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0);
581 	if (ret) {
582 		EMSG("Fail to set cqc_bt");
583 		return ret;
584 	}
585 
586 	/* Security mode does not support msi */
587 	io_write32(qm->io_base + QM_VF_AEQ_INT_MASK, QM_VF_AEQ_INT_MASK_EN);
588 	io_write32(qm->io_base + QM_VF_EQ_INT_MASK, QM_VF_EQ_INT_MASK_EN);
589 
590 	return HISI_QM_DRVCRYPT_NO_ERR;
591 }
592 
593 void hisi_qm_dev_init(struct hisi_qm *qm)
594 {
595 	if (qm->fun_type == HISI_QM_HW_VF)
596 		return;
597 
598 	/* QM user domain */
599 	io_write32(qm->io_base + QM_ARUSER_M_CFG_1, QM_AXUSER_CFG);
600 	io_write32(qm->io_base + QM_ARUSER_M_CFG_ENABLE, AXUSER_M_CFG_ENABLE);
601 	io_write32(qm->io_base + QM_AWUSER_M_CFG_1, QM_AXUSER_CFG);
602 	io_write32(qm->io_base + QM_AWUSER_M_CFG_ENABLE, AXUSER_M_CFG_ENABLE);
603 	/* QM cache */
604 	io_write32(qm->io_base + QM_AXI_M_CFG, AXI_M_CFG);
605 
606 	if (qm->version == HISI_QM_HW_V2) {
607 		/* Disable FLR triggered by BME(bus master enable) */
608 		io_write32(qm->io_base + QM_PEH_AXUSER_CFG, PEH_AXUSER_CFG);
609 		/* Set sec sqc and cqc cache wb threshold 4 */
610 		io_write32(qm->io_base + QM_CACHE_CTL, QM_CACHE_CFG);
611 	}
612 	/* Disable QM ras */
613 	io_write32(qm->io_base + HISI_QM_ABNML_INT_MASK,
614 		   HISI_QM_ABNML_INT_MASK_CFG);
615 }
616 
617 static enum hisi_drv_status qm_sqc_cfg(struct hisi_qp *qp)
618 {
619 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
620 	struct hisi_qm *qm = qp->qm;
621 	struct qm_sqc *sqc = NULL;
622 	paddr_t sqc_dma = 0;
623 
624 	sqc = memalign(HISI_QM_ALIGN32, sizeof(struct qm_sqc));
625 	if (!sqc)
626 		return HISI_QM_DRVCRYPT_ENOMEM;
627 
628 	sqc_dma = virt_to_phys(sqc);
629 
630 	memset(sqc, 0, sizeof(struct qm_sqc));
631 	reg_pair_from_64(qp->sqe_dma, &sqc->base_h, &sqc->base_l);
632 	sqc->dw3 = (HISI_QM_Q_DEPTH - 1) |
633 		    SHIFT_U32(qm->sqe_log2_size, QM_SQ_SQE_SIZE_SHIFT);
634 	sqc->rand_data = QM_DB_RAND_DATA;
635 	sqc->cq_num = qp->qp_id;
636 	sqc->w13 = BIT32(QM_SQ_ORDER_SHIFT) |
637 		   SHIFT_U32(qp->sq_type, QM_SQ_TYPE_SHIFT);
638 
639 	ret = hisi_qm_mb_write(qm, QM_MB_CMD_SQC, sqc_dma, qp->qp_id);
640 	free(sqc);
641 
642 	return ret;
643 }
644 
645 static enum hisi_drv_status qm_cqc_cfg(struct hisi_qp *qp)
646 {
647 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
648 	struct hisi_qm *qm = qp->qm;
649 	struct qm_cqc *cqc = NULL;
650 	paddr_t cqc_dma = 0;
651 
652 	cqc = memalign(HISI_QM_ALIGN32, sizeof(struct qm_cqc));
653 	if (!cqc)
654 		return HISI_QM_DRVCRYPT_ENOMEM;
655 
656 	cqc_dma = virt_to_phys(cqc);
657 
658 	memset(cqc, 0, sizeof(struct qm_cqc));
659 	reg_pair_from_64(qp->cqe_dma, &cqc->base_h, &cqc->base_l);
660 	cqc->dw3 = (HISI_QM_Q_DEPTH - 1) |
661 		    SHIFT_U32(QM_CQE_SIZE, QM_CQ_CQE_SIZE_SHIFT);
662 	cqc->rand_data = QM_DB_RAND_DATA;
663 	cqc->dw6 = PHASE_DEFAULT_VAL;
664 
665 	ret = hisi_qm_mb_write(qm, QM_MB_CMD_CQC, cqc_dma, qp->qp_id);
666 	free(cqc);
667 
668 	return ret;
669 }
670 
671 struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, uint8_t sq_type)
672 {
673 	struct hisi_qp *qp = NULL;
674 	int cur_idx = 0;
675 	uint32_t i = 0;
676 
677 	mutex_lock(&qm->qp_lock);
678 	if (qm->qp_in_used == qm->qp_num) {
679 		EMSG("All %"PRIu32" queues of QM are busy", qm->qp_num);
680 		goto err_proc;
681 	}
682 
683 	for (i = 0; i < qm->qp_num; i++) {
684 		cur_idx = (qm->qp_idx + i) % qm->qp_num;
685 		if (!qm->qp_array[cur_idx].used) {
686 			qm->qp_array[cur_idx].used = true;
687 			qm->qp_idx = cur_idx + 1;
688 			break;
689 		}
690 	}
691 
692 	qp = qm->qp_array + cur_idx;
693 	memset(qp->cqe, 0, sizeof(struct qm_cqe) * HISI_QM_Q_DEPTH);
694 	qp->sq_type = sq_type;
695 	qp->sq_tail = 0;
696 	qp->cq_head = 0;
697 	qp->cqc_phase = true;
698 
699 	if (qm_sqc_cfg(qp)) {
700 		EMSG("Fail to set qp[%"PRIu32"] sqc", qp->qp_id);
701 		goto err_proc;
702 	}
703 
704 	if (qm_cqc_cfg(qp)) {
705 		EMSG("Fail to set qp[%"PRIu32"] cqc", qp->qp_id);
706 		goto err_proc;
707 	}
708 
709 	qm->qp_in_used++;
710 	mutex_unlock(&qm->qp_lock);
711 	return qp;
712 
713 err_proc:
714 	qp->sq_type = 0;
715 	qp->cqc_phase = false;
716 	mutex_unlock(&qm->qp_lock);
717 	return NULL;
718 }
719 
720 void hisi_qm_release_qp(struct hisi_qp *qp)
721 {
722 	struct hisi_qm *qm = NULL;
723 
724 	if (!qp) {
725 		EMSG("QP is NULL");
726 		return;
727 	}
728 
729 	qm = qp->qm;
730 	mutex_lock(&qm->qp_lock);
731 	qm->qp_in_used--;
732 	qp->used = false;
733 	mutex_unlock(&qm->qp_lock);
734 }
735 
736 static void qm_sq_tail_update(struct hisi_qp *qp)
737 {
738 	if (qp->sq_tail == HISI_QM_Q_DEPTH - 1)
739 		qp->sq_tail = 0;
740 	else
741 		qp->sq_tail++;
742 }
743 
744 /*
745  * One task thread will just bind to one hardware queue, and
746  * hardware does not support msi. So we have no lock here.
747  */
748 enum hisi_drv_status hisi_qp_send(struct hisi_qp *qp, void *msg)
749 {
750 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
751 	struct hisi_qm *qm = NULL;
752 	void *sqe = NULL;
753 
754 	if (!qp) {
755 		EMSG("QP is NULL");
756 		return HISI_QM_DRVCRYPT_EINVAL;
757 	}
758 
759 	qm = qp->qm;
760 	ret = qm->dev_status_check(qm);
761 	if (ret)
762 		return ret;
763 
764 	sqe = (void *)((vaddr_t)qp->sqe + qm->sqe_size * qp->sq_tail);
765 	memset(sqe, 0, qm->sqe_size);
766 
767 	ret = qp->fill_sqe(sqe, msg);
768 	if (ret) {
769 		EMSG("Fail to fill sqe");
770 		return ret;
771 	}
772 
773 	qm_sq_tail_update(qp);
774 
775 	dsb();
776 	qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_SQ, qp->sq_tail, 0);
777 
778 	return HISI_QM_DRVCRYPT_NO_ERR;
779 }
780 
781 static void qm_cq_head_update(struct hisi_qp *qp)
782 {
783 	if (qp->cq_head == HISI_QM_Q_DEPTH - 1) {
784 		qp->cqc_phase = !qp->cqc_phase;
785 		qp->cq_head = 0;
786 	} else {
787 		qp->cq_head++;
788 	}
789 }
790 
791 static enum hisi_drv_status hisi_qp_recv(struct hisi_qp *qp, void *msg)
792 {
793 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
794 	struct hisi_qm *qm = qp->qm;
795 	struct qm_cqe *cqe = NULL;
796 	void *sqe = NULL;
797 
798 	ret = qm->dev_status_check(qm);
799 	if (ret)
800 		return ret;
801 
802 	cqe = qp->cqe + qp->cq_head;
803 	if (QM_CQE_PHASE(cqe) == qp->cqc_phase) {
804 		dsb_osh();
805 		sqe = (void *)((vaddr_t)qp->sqe + qm->sqe_size * cqe->sq_head);
806 		ret = qp->parse_sqe(sqe, msg);
807 		qm_cq_head_update(qp);
808 		qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->cq_head, 0);
809 		if (ret) {
810 			EMSG("Fail to parse sqe");
811 			return ret;
812 		}
813 	} else {
814 		return HISI_QM_DRVCRYPT_NO_ERR;
815 	}
816 
817 	return HISI_QM_DRVCRYPT_RECV_DONE;
818 }
819 
820 static void qm_dfx_dump(struct hisi_qm *qm)
821 {
822 	const struct qm_dfx_registers *regs = qm_dfx_regs;
823 	__maybe_unused uint32_t val = 0;
824 
825 	if (qm->fun_type == HISI_QM_HW_VF)
826 		return;
827 
828 	while (regs->reg_name) {
829 		val = io_read32(qm->io_base + regs->reg_offset);
830 		EMSG("%s= 0x%" PRIx32, regs->reg_name, val);
831 		regs++;
832 	}
833 }
834 
835 enum hisi_drv_status hisi_qp_recv_sync(struct hisi_qp *qp, void *msg)
836 {
837 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
838 	uint32_t timeout = 0;
839 
840 	if (!qp) {
841 		EMSG("QP is NULL");
842 		return HISI_QM_DRVCRYPT_EINVAL;
843 	}
844 
845 	timeout = timeout_init_us(QM_SINGLE_WAIT_TIME *
846 				  HISI_QM_RECV_SYNC_TIMEOUT);
847 	while (!timeout_elapsed(timeout)) {
848 		ret = hisi_qp_recv(qp, msg);
849 		if (ret) {
850 			if (ret != HISI_QM_DRVCRYPT_RECV_DONE) {
851 				EMSG("QM recv task error");
852 				qm_dfx_dump(qp->qm);
853 				return ret;
854 			} else {
855 				return HISI_QM_DRVCRYPT_NO_ERR;
856 			}
857 		}
858 	}
859 
860 	EMSG("QM recv task timeout");
861 	qm_dfx_dump(qp->qm);
862 	return HISI_QM_DRVCRYPT_ETMOUT;
863 }
864