xref: /optee_os/core/drivers/crypto/hisilicon/hisi_qm.c (revision c83a542f37348045087f915a5bd76e34b9821c96)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright 2022-2023 HiSilicon Limited.
4  * Kunpeng hardware accelerator queue management module.
5  */
6 #include "hisi_qm.h"
7 
8 #define QM_FVT_CFG_RDY_BIT	0x1
9 /* Doorbell */
10 #define QM_DOORBELL_SQ_CQ_BASE	0x1000
11 #define QM_DB_CMD_SHIFT		12
12 #define QM_DB_RAND_DATA_SHIFT	16
13 #define QM_DB_INDEX_SHIFT	32
14 #define QM_DB_PRIORITY_SHIFT	48
15 #define QM_DB_RAND_DATA		0x5a
16 #define QM_DOORBELL_CMD_SQ	0
17 #define QM_DOORBELL_CMD_CQ	1
18 /* Mailbox */
19 #define QM_MAILBOX_BASE		0x300
20 #define QM_MAILBOX_DATA_ADDR_L	0x304
21 #define QM_MAILBOX_DATA_ADDR_H	0x308
22 #define QM_MB_BUSY_SHIFT	13
23 #define QM_MB_BUSY_BIT		BIT32(QM_MB_BUSY_SHIFT)
24 #define QM_MB_OP_SHIFT		14
25 #define QM_MB_OP_WR		0
26 #define QM_MB_OP_RD		1
27 #define QM_MB_STATUS_MASK	GENMASK_32(12, 9)
28 #define QM_MB_WAIT_READY_CNT	10
29 #define QM_MB_WAIT_MAX_CNT	21000
30 #define QM_MB_WAIT_PERIOD	200
31 /* XQC_VFT */
32 #define QM_VFT_CFG_OP_ENABLE	0x100054
33 #define QM_VFT_CFG_OP_WR	0x100058
34 #define QM_VFT_CFG_TYPE		0x10005c
35 #define QM_VFT_CFG_ADDRESS	0x100060
36 #define QM_VFT_CFG_DATA_L	0x100064
37 #define QM_VFT_CFG_DATA_H	0x100068
38 #define QM_VFT_CFG_RDY		0x10006c
39 #define QM_SQC_VFT		0
40 #define QM_CQC_VFT		1
41 #define QM_SQC_VFT_START_SQN_SHIFT 28
42 #define QM_SQC_VFT_VALID	BIT64(44)
43 #define QM_SQC_VFT_SQ_NUM_SHIFT 45
44 #define QM_CQC_VFT_VALID	BIT(28)
45 #define QM_VFT_WRITE		0
46 #define QM_VFT_READ		1
47 #define QM_SQC_VFT_BASE_MASK	0x3ff
48 #define QM_SQC_VFT_NUM_MASK	0x3ff
49 /* QM INIT */
50 #define QM_MEM_START_INIT	0x100040
51 #define QM_MEM_INIT_DONE	0x100044
52 #define QM_VF_AEQ_INT_MASK	0x4
53 #define QM_VF_AEQ_INT_MASK_EN	0x1
54 #define QM_VF_EQ_INT_MASK	0xc
55 #define QM_VF_EQ_INT_MASK_EN	0x1
56 #define QM_ARUSER_M_CFG_1	0x100088
57 #define QM_ARUSER_M_CFG_ENABLE	0x100090
58 #define QM_AWUSER_M_CFG_1	0x100098
59 #define QM_AWUSER_M_CFG_ENABLE	0x1000a0
60 #define QM_AXUSER_CFG		0x40001070
61 #define AXUSER_M_CFG_ENABLE	0x7ffffc
62 #define QM_AXI_M_CFG		0x1000ac
63 #define AXI_M_CFG		0xffff
64 #define QM_PEH_AXUSER_CFG	0x1000cc
65 #define PEH_AXUSER_CFG		0x400801
66 #define QM_CACHE_CTL		0x100050
67 #define QM_CACHE_CFG		0x4893
68 #define QM_CACHE_WB_START	0x204
69 #define QM_CACHE_WB_DONE	0x208
70 #define QM_PM_CTRL0		0x100148
71 #define QM_IDLE_DISABLE		BIT(9)
72 #define QM_DB_TIMEOUT_CFG	0x100074
73 #define QM_DB_TIMEOUT_SET	0x1fffff
74 /* XQC shift */
75 #define QM_SQ_SQE_SIZE_SHIFT	12
76 #define QM_SQ_ORDER_SHIFT	4
77 #define QM_SQ_TYPE_SHIFT	8
78 #define QM_CQE_SIZE		4
79 #define QM_CQ_CQE_SIZE_SHIFT	12
80 /* CQE */
81 #define QM_CQE_PHASE(cqe) (((cqe)->w7) & QM_FVT_CFG_RDY_BIT)
82 
83 enum qm_mailbox_common_cmd {
84 	QM_MB_CMD_SQC = 0x0,
85 	QM_MB_CMD_CQC,
86 	QM_MB_CMD_EQC,
87 	QM_MB_CMD_AEQC,
88 	QM_MB_CMD_SQC_BT,
89 	QM_MB_CMD_CQC_BT,
90 	QM_MB_CMD_SQC_VFT,
91 };
92 
93 enum qm_mailbox_cmd_v3 {
94 	QM_MB_CM_CLOSE_QM = 0x7,
95 	QM_MB_CMD_CLOSE_QP,
96 	QM_MB_CMD_FLUSH_QM,
97 	QM_MB_CMD_FLUSH_QP,
98 	QM_MB_CMD_SRC = 0xc,
99 	QM_MB_CMD_DST,
100 	QM_MB_CMD_STOP_QM,
101 };
102 
103 struct qm_mailbox {
104 	union {
105 		struct {
106 			uint16_t w0;
107 			uint16_t queue;
108 			uint32_t base_l;
109 			uint32_t base_h;
110 			uint32_t token;
111 		};
112 		uint64_t x[2];
113 	};
114 };
115 
116 struct qm_dfx_registers {
117 	const char *reg_name;
118 	uint32_t reg_offset;
119 };
120 
121 static const struct qm_dfx_registers qm_dfx_regs[] = {
122 	{ .reg_name = "QM_ECC_1BIT_CNT           ", .reg_offset = 0x104000 },
123 	{ .reg_name = "QM_ECC_MBIT_CNT           ", .reg_offset = 0x104008 },
124 	{ .reg_name = "QM_DFX_MB_CNT             ", .reg_offset = 0x104018 },
125 	{ .reg_name = "QM_DFX_DB_CNT             ", .reg_offset = 0x104028 },
126 	{ .reg_name = "QM_DFX_SQE_CNT            ", .reg_offset = 0x104038 },
127 	{ .reg_name = "QM_DFX_CQE_CNT            ", .reg_offset = 0x104048 },
128 	{ .reg_name = "QM_DFX_SEND_SQE_TO_ACC_CNT", .reg_offset = 0x104050 },
129 	{ .reg_name = "QM_DFX_WB_SQE_FROM_ACC_CNT", .reg_offset = 0x104058 },
130 	{ .reg_name = "QM_DFX_ACC_FINISH_CNT     ", .reg_offset = 0x104060 },
131 	{ .reg_name = "QM_DFX_CQE_ERR_CNT        ", .reg_offset = 0x1040b4 },
132 	{ }
133 };
134 
135 void hisi_qm_get_version(struct hisi_qm *qm)
136 {
137 	qm->version = io_read32(qm->io_base + HISI_QM_REVISON_ID_BASE) &
138 		      HISI_QM_REVISON_ID_MASK;
139 }
140 
141 static void qm_db(struct hisi_qm *qm, uint16_t qn, uint8_t cmd, uint16_t index,
142 		  uint8_t priority)
143 {
144 	uint64_t doorbell = 0;
145 
146 	doorbell = qn | SHIFT_U64(cmd, QM_DB_CMD_SHIFT) |
147 		   SHIFT_U64(QM_DB_RAND_DATA, QM_DB_RAND_DATA_SHIFT) |
148 		   SHIFT_U64(index, QM_DB_INDEX_SHIFT) |
149 		   SHIFT_U64(priority, QM_DB_PRIORITY_SHIFT);
150 
151 	io_write64(qm->io_base + QM_DOORBELL_SQ_CQ_BASE, doorbell);
152 }
153 
154 static void qm_mb_write(struct hisi_qm *qm, struct qm_mailbox *mb)
155 {
156 	vaddr_t dst = qm->io_base + QM_MAILBOX_BASE;
157 
158 	write_64bit_pair(dst, mb->x[1], mb->x[0]);
159 	dsb_osh();
160 }
161 
162 static void qm_mb_read(struct hisi_qm *qm, struct qm_mailbox *mb)
163 {
164 	vaddr_t mb_base = qm->io_base + QM_MAILBOX_BASE;
165 
166 	read_64bit_pair(mb_base, mb->x + 1, mb->x);
167 	dsb_osh();
168 }
169 
170 static enum hisi_drv_status qm_wait_mb_ready(struct hisi_qm *qm)
171 {
172 	struct qm_mailbox mb = { };
173 	uint32_t timeout = 0;
174 
175 	timeout = timeout_init_us(QM_MB_WAIT_PERIOD * QM_MB_WAIT_READY_CNT);
176 	while (!timeout_elapsed(timeout)) {
177 		/* 128 bits should be read from hardware at one time*/
178 		qm_mb_read(qm, &mb);
179 		if (!(mb.w0 & QM_MB_BUSY_BIT))
180 			return HISI_QM_DRVCRYPT_NO_ERR;
181 	}
182 
183 	EMSG("QM mailbox is busy to start!");
184 
185 	return HISI_QM_DRVCRYPT_EBUSY;
186 }
187 
188 static enum hisi_drv_status qm_wait_mb_finish(struct hisi_qm *qm,
189 					      struct qm_mailbox *mb)
190 {
191 	uint32_t timeout = 0;
192 
193 	timeout = timeout_init_us(QM_MB_WAIT_PERIOD * QM_MB_WAIT_MAX_CNT);
194 	while (!timeout_elapsed(timeout)) {
195 		qm_mb_read(qm, mb);
196 		if (!(mb->w0 & QM_MB_BUSY_BIT)) {
197 			if (mb->w0 & QM_MB_STATUS_MASK) {
198 				EMSG("QM mailbox operation failed!");
199 				return HISI_QM_DRVCRYPT_EIO;
200 			} else {
201 				return HISI_QM_DRVCRYPT_NO_ERR;
202 			}
203 		}
204 	}
205 
206 	return HISI_QM_DRVCRYPT_ETMOUT;
207 }
208 
209 static void qm_mb_init(struct qm_mailbox *mb, uint8_t cmd, uint64_t base,
210 		       uint16_t qnum, uint8_t op)
211 {
212 	mb->w0 = cmd | SHIFT_U32(op, QM_MB_OP_SHIFT) |  QM_MB_BUSY_BIT;
213 	mb->queue = qnum;
214 	reg_pair_from_64(base, &mb->base_h, &mb->base_l);
215 	mb->token = 0;
216 }
217 
218 static enum hisi_drv_status qm_mb_nolock(struct hisi_qm *qm,
219 					 struct qm_mailbox *mb)
220 {
221 	if (qm_wait_mb_ready(qm))
222 		return HISI_QM_DRVCRYPT_EBUSY;
223 
224 	qm_mb_write(qm, mb);
225 
226 	return qm_wait_mb_finish(qm, mb);
227 }
228 
229 static enum hisi_drv_status hisi_qm_mb_write(struct hisi_qm *qm, uint8_t cmd,
230 					     uintptr_t dma_addr, uint16_t qnum)
231 {
232 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
233 	struct qm_mailbox mb = { };
234 
235 	qm_mb_init(&mb, cmd, dma_addr, qnum, QM_MB_OP_WR);
236 	mutex_lock(&qm->mailbox_lock);
237 	ret = qm_mb_nolock(qm, &mb);
238 	mutex_unlock(&qm->mailbox_lock);
239 
240 	return ret;
241 }
242 
243 static enum hisi_drv_status hisi_qm_mb_read(struct hisi_qm *qm, uint64_t *base,
244 					    uint8_t cmd, uint16_t qnum)
245 {
246 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
247 	struct qm_mailbox mb = { };
248 
249 	qm_mb_init(&mb, cmd, 0, qnum, QM_MB_OP_RD);
250 	mutex_lock(&qm->mailbox_lock);
251 	ret = qm_mb_nolock(qm, &mb);
252 	mutex_unlock(&qm->mailbox_lock);
253 	if (ret)
254 		return ret;
255 
256 	reg_pair_from_64(*base, &mb.base_h, &mb.base_l);
257 
258 	return HISI_QM_DRVCRYPT_NO_ERR;
259 }
260 
261 static void qm_cfg_vft_data(struct hisi_qm *qm, uint8_t vft_type,
262 			    uint32_t base, uint32_t number)
263 {
264 	uint32_t data_h = 0;
265 	uint32_t data_l = 0;
266 	uint64_t data = 0;
267 
268 	switch (vft_type) {
269 	case QM_SQC_VFT:
270 		data = SHIFT_U64(base, QM_SQC_VFT_START_SQN_SHIFT) |
271 			QM_SQC_VFT_VALID |
272 			SHIFT_U64((number - 1), QM_SQC_VFT_SQ_NUM_SHIFT);
273 		break;
274 	case QM_CQC_VFT:
275 		data = QM_CQC_VFT_VALID;
276 		break;
277 	default:
278 		panic("Invalid vft type");
279 	}
280 
281 	reg_pair_from_64(data, &data_h, &data_l);
282 	io_write32(qm->io_base + QM_VFT_CFG_DATA_L, data_l);
283 	io_write32(qm->io_base + QM_VFT_CFG_DATA_H, data_h);
284 }
285 
286 static enum hisi_drv_status qm_set_vft_common(struct hisi_qm *qm,
287 					      uint8_t vft_type,
288 					      uint32_t function,
289 					      uint32_t base,
290 					      uint32_t num)
291 {
292 	uint32_t val = 0;
293 
294 	if (IO_READ32_POLL_TIMEOUT(qm->io_base + QM_VFT_CFG_RDY, val,
295 				   val & QM_FVT_CFG_RDY_BIT, POLL_PERIOD,
296 				   POLL_TIMEOUT)) {
297 		EMSG("QM VFT is not ready");
298 		return HISI_QM_DRVCRYPT_EBUSY;
299 	}
300 
301 	io_write32(qm->io_base + QM_VFT_CFG_OP_WR, QM_VFT_WRITE);
302 	io_write32(qm->io_base + QM_VFT_CFG_TYPE, vft_type);
303 	io_write32(qm->io_base + QM_VFT_CFG_ADDRESS, function);
304 	qm_cfg_vft_data(qm, vft_type, base, num);
305 	io_write32(qm->io_base + QM_VFT_CFG_RDY, 0x0);
306 	io_write32(qm->io_base + QM_VFT_CFG_OP_ENABLE, QM_FVT_CFG_RDY_BIT);
307 
308 	if (IO_READ32_POLL_TIMEOUT(qm->io_base + QM_VFT_CFG_RDY, val,
309 				   val & QM_FVT_CFG_RDY_BIT, POLL_PERIOD,
310 				   POLL_TIMEOUT)) {
311 		EMSG("QM VFT is not ready");
312 		return HISI_QM_DRVCRYPT_EBUSY;
313 	}
314 
315 	return HISI_QM_DRVCRYPT_NO_ERR;
316 }
317 
318 static enum hisi_drv_status qm_set_xqc_vft(struct hisi_qm *qm,
319 					   uint32_t function,
320 					   uint32_t base, uint32_t num)
321 {
322 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
323 	int i = 0;
324 
325 	if (!num) {
326 		EMSG("Invalid sq num");
327 		return HISI_QM_DRVCRYPT_EINVAL;
328 	}
329 
330 	for (i = QM_SQC_VFT; i <= QM_CQC_VFT; i++) {
331 		ret = qm_set_vft_common(qm, i, function, base, num);
332 		if (ret) {
333 			EMSG("QM set type %d fail", i);
334 			return ret;
335 		}
336 	}
337 
338 	return HISI_QM_DRVCRYPT_NO_ERR;
339 }
340 
341 static enum hisi_drv_status qm_get_vft(struct hisi_qm *qm, uint32_t *base,
342 				       uint32_t *num)
343 {
344 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
345 	uint64_t sqc_vft = 0;
346 
347 	ret = hisi_qm_mb_read(qm, &sqc_vft, QM_MB_CMD_SQC_VFT, 0);
348 	if (ret)
349 		return ret;
350 
351 	*base = (sqc_vft >> QM_SQC_VFT_START_SQN_SHIFT) & QM_SQC_VFT_BASE_MASK;
352 	*num = ((sqc_vft >> QM_SQC_VFT_SQ_NUM_SHIFT) & QM_SQC_VFT_NUM_MASK) + 1;
353 
354 	return HISI_QM_DRVCRYPT_NO_ERR;
355 }
356 
357 static void qp_free(struct hisi_qm *qm, uint32_t id)
358 {
359 	struct hisi_qp *qp = &qm->qp_array[id];
360 
361 	free(qp->sqe);
362 	free(qp->cqe);
363 }
364 
365 static enum hisi_drv_status qp_alloc(struct hisi_qm *qm, uint32_t id)
366 {
367 	size_t sq_size = qm->sqe_size * HISI_QM_Q_DEPTH;
368 	size_t cq_size = sizeof(struct qm_cqe) * HISI_QM_Q_DEPTH;
369 	struct hisi_qp *qp = &qm->qp_array[id];
370 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
371 
372 	qp->sqe = memalign(HISI_QM_ALIGN128, sq_size);
373 	if (!qp->sqe) {
374 		EMSG("Fail to malloc sq[%"PRIu32"]", id);
375 		return HISI_QM_DRVCRYPT_ENOMEM;
376 	}
377 	qp->sqe_dma = virt_to_phys(qp->sqe);
378 	qp->cqe = memalign(HISI_QM_ALIGN32, cq_size);
379 	if (!qp->cqe) {
380 		EMSG("Fail to malloc cq[%"PRIu32"]", id);
381 		ret = HISI_QM_DRVCRYPT_ENOMEM;
382 		goto free_sqe;
383 	}
384 	qp->cqe_dma = virt_to_phys(qp->cqe);
385 
386 	qp->qp_id = id;
387 	qp->qm = qm;
388 	return HISI_QM_DRVCRYPT_NO_ERR;
389 
390 free_sqe:
391 	free(qp->sqe);
392 	return ret;
393 }
394 
395 static void hisi_qm_free_xqc(struct qm_xqc *xqc)
396 {
397 	free(xqc->cqc);
398 	free(xqc->sqc);
399 }
400 
401 static void qm_free(struct hisi_qm *qm)
402 {
403 	unsigned int i = 0;
404 
405 	for (i = 0; i < qm->qp_num; i++)
406 		qp_free(qm, i);
407 
408 	free(qm->qp_array);
409 	hisi_qm_free_xqc(&qm->xqc);
410 	hisi_qm_free_xqc(&qm->cfg_xqc);
411 }
412 
413 static enum hisi_drv_status hisi_qm_alloc_xqc(struct qm_xqc *xqc,
414 					      uint32_t qp_num)
415 {
416 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
417 	size_t sqc_size = 0;
418 	size_t cqc_size = 0;
419 
420 	sqc_size = sizeof(struct qm_sqc) * qp_num;
421 	cqc_size = sizeof(struct qm_cqc) * qp_num;
422 
423 	xqc->sqc = memalign(HISI_QM_ALIGN32, sqc_size);
424 	if (!xqc->sqc) {
425 		EMSG("Fail to malloc sqc");
426 		return HISI_QM_DRVCRYPT_ENOMEM;
427 	}
428 	memset(xqc->sqc, 0, sqc_size);
429 	xqc->sqc_dma = virt_to_phys(xqc->sqc);
430 
431 	xqc->cqc = memalign(HISI_QM_ALIGN32, cqc_size);
432 	if (!xqc->cqc) {
433 		EMSG("Fail to malloc cqc");
434 		ret = HISI_QM_DRVCRYPT_ENOMEM;
435 		goto free_sqc;
436 	}
437 	memset(xqc->cqc, 0, cqc_size);
438 	xqc->cqc_dma = virt_to_phys(xqc->cqc);
439 
440 	return HISI_QM_DRVCRYPT_NO_ERR;
441 
442 	free(xqc->cqc);
443 free_sqc:
444 	free(xqc->sqc);
445 	return ret;
446 }
447 
448 static enum hisi_drv_status qm_alloc(struct hisi_qm *qm)
449 {
450 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
451 	int32_t j;
452 	uint32_t i;
453 
454 	ret = hisi_qm_alloc_xqc(&qm->xqc, qm->qp_num);
455 	if (ret)
456 		return ret;
457 
458 	ret = hisi_qm_alloc_xqc(&qm->cfg_xqc, 1);
459 	if (ret)
460 		goto free_xqc;
461 
462 	qm->qp_array = calloc(qm->qp_num, sizeof(struct hisi_qp));
463 	if (!qm->qp_array) {
464 		EMSG("Fail to malloc qp_array");
465 		ret = HISI_QM_DRVCRYPT_ENOMEM;
466 		goto free_cfg_xqc;
467 	}
468 
469 	for (i = 0; i < qm->qp_num; i++) {
470 		ret = qp_alloc(qm, i);
471 		if (ret)
472 			goto free_qp_mem;
473 	}
474 
475 	return HISI_QM_DRVCRYPT_NO_ERR;
476 
477 free_qp_mem:
478 	for (j = (int)i - 1; j >= 0; j--)
479 		qp_free(qm, j);
480 	free(qm->qp_array);
481 free_cfg_xqc:
482 	hisi_qm_free_xqc(&qm->cfg_xqc);
483 free_xqc:
484 	hisi_qm_free_xqc(&qm->xqc);
485 	return ret;
486 }
487 
488 enum hisi_drv_status hisi_qm_init(struct hisi_qm *qm)
489 {
490 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
491 
492 	if (qm->fun_type == HISI_QM_HW_VF) {
493 		ret = qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
494 		if (ret) {
495 			EMSG("Fail to get function vft config");
496 			return ret;
497 		}
498 	}
499 
500 	if (!qm->qp_num || !qm->sqe_size) {
501 		EMSG("Invalid QM parameters");
502 		return HISI_QM_DRVCRYPT_EINVAL;
503 	}
504 
505 	ret = qm_alloc(qm);
506 	if (ret)
507 		return ret;
508 
509 	qm->qp_in_used = 0;
510 	qm->qp_idx = 0;
511 	mutex_init(&qm->qp_lock);
512 	mutex_init(&qm->mailbox_lock);
513 
514 	return HISI_QM_DRVCRYPT_NO_ERR;
515 }
516 
517 static void qm_cache_writeback(struct hisi_qm *qm)
518 {
519 	uint32_t val = 0;
520 
521 	io_write32(qm->io_base + QM_CACHE_WB_START, QM_FVT_CFG_RDY_BIT);
522 
523 	if (IO_READ32_POLL_TIMEOUT(qm->io_base + QM_CACHE_WB_DONE, val,
524 				   val & QM_FVT_CFG_RDY_BIT, POLL_PERIOD,
525 				   POLL_TIMEOUT))
526 		panic("QM writeback sqc cache fail");
527 }
528 
529 void hisi_qm_uninit(struct hisi_qm *qm)
530 {
531 	qm_cache_writeback(qm);
532 	qm_free(qm);
533 	mutex_destroy(&qm->qp_lock);
534 	mutex_destroy(&qm->mailbox_lock);
535 }
536 
537 static enum hisi_drv_status qm_hw_mem_reset(struct hisi_qm *qm)
538 {
539 	uint32_t val = 0;
540 
541 	io_write32(qm->io_base + QM_MEM_START_INIT, QM_FVT_CFG_RDY_BIT);
542 
543 	if (IO_READ32_POLL_TIMEOUT(qm->io_base + QM_MEM_INIT_DONE, val,
544 				   val & QM_FVT_CFG_RDY_BIT, POLL_PERIOD,
545 				   POLL_TIMEOUT))
546 		return HISI_QM_DRVCRYPT_EBUSY;
547 
548 	return HISI_QM_DRVCRYPT_NO_ERR;
549 }
550 
551 static enum hisi_drv_status qm_func_vft_cfg(struct hisi_qm *qm)
552 {
553 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
554 	uint32_t q_base = qm->qp_num;
555 	uint32_t act_q_num = 0;
556 	unsigned int i = 0;
557 	unsigned int j = 0;
558 
559 	if (!qm->vfs_num)
560 		return HISI_QM_DRVCRYPT_NO_ERR;
561 
562 	if (qm->vfs_num > HISI_QM_MAX_VFS_NUM) {
563 		EMSG("Invalid QM vfs_num");
564 		return HISI_QM_DRVCRYPT_EINVAL;
565 	}
566 
567 	for (i = 1; i <= qm->vfs_num; i++) {
568 		act_q_num = HISI_QM_VF_Q_NUM;
569 		ret = qm_set_xqc_vft(qm, i, q_base, act_q_num);
570 		if (ret) {
571 			for (j = 1; j < i; j++)
572 				(void)qm_set_xqc_vft(qm, j, 0, 0);
573 			return ret;
574 		}
575 		q_base += act_q_num;
576 	}
577 
578 	return HISI_QM_DRVCRYPT_NO_ERR;
579 }
580 
581 enum hisi_drv_status hisi_qm_start(struct hisi_qm *qm)
582 {
583 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
584 
585 	if (qm->fun_type == HISI_QM_HW_PF) {
586 		ret = qm_hw_mem_reset(qm);
587 		if (ret) {
588 			EMSG("Fail to reset QM hardware mem");
589 			return ret;
590 		}
591 
592 		ret = qm_set_xqc_vft(qm, 0, qm->qp_base, qm->qp_num);
593 		if (ret) {
594 			EMSG("Fail to set PF xqc_vft");
595 			return ret;
596 		}
597 
598 		ret = qm_func_vft_cfg(qm);
599 		if (ret) {
600 			EMSG("Fail to set VF xqc_vft");
601 			return ret;
602 		}
603 	}
604 
605 	ret = hisi_qm_mb_write(qm, QM_MB_CMD_SQC_BT, qm->xqc.sqc_dma, 0);
606 	if (ret) {
607 		EMSG("Fail to set sqc_bt");
608 		return ret;
609 	}
610 
611 	ret = hisi_qm_mb_write(qm, QM_MB_CMD_CQC_BT, qm->xqc.cqc_dma, 0);
612 	if (ret) {
613 		EMSG("Fail to set cqc_bt");
614 		return ret;
615 	}
616 
617 	/* Security mode does not support msi */
618 	io_write32(qm->io_base + QM_VF_AEQ_INT_MASK, QM_VF_AEQ_INT_MASK_EN);
619 	io_write32(qm->io_base + QM_VF_EQ_INT_MASK, QM_VF_EQ_INT_MASK_EN);
620 
621 	return HISI_QM_DRVCRYPT_NO_ERR;
622 }
623 
624 static void qm_disable_clock_gate(struct hisi_qm *qm)
625 
626 {
627 	if (qm->version == HISI_QM_HW_V2)
628 		return;
629 
630 	io_setbits32(qm->io_base + QM_PM_CTRL0, QM_IDLE_DISABLE);
631 }
632 
633 void hisi_qm_dev_init(struct hisi_qm *qm)
634 {
635 	if (qm->fun_type == HISI_QM_HW_VF)
636 		return;
637 
638 	qm_disable_clock_gate(qm);
639 
640 	/* QM user domain */
641 	io_write32(qm->io_base + QM_ARUSER_M_CFG_1, QM_AXUSER_CFG);
642 	io_write32(qm->io_base + QM_ARUSER_M_CFG_ENABLE, AXUSER_M_CFG_ENABLE);
643 	io_write32(qm->io_base + QM_AWUSER_M_CFG_1, QM_AXUSER_CFG);
644 	io_write32(qm->io_base + QM_AWUSER_M_CFG_ENABLE, AXUSER_M_CFG_ENABLE);
645 	/* QM cache */
646 	io_write32(qm->io_base + QM_AXI_M_CFG, AXI_M_CFG);
647 
648 	if (qm->version == HISI_QM_HW_V2) {
649 		/* Disable FLR triggered by BME(bus master enable) */
650 		io_write32(qm->io_base + QM_PEH_AXUSER_CFG, PEH_AXUSER_CFG);
651 		/* Set sec sqc and cqc cache wb threshold 4 */
652 		io_write32(qm->io_base + QM_CACHE_CTL, QM_CACHE_CFG);
653 	}
654 	/* Disable QM ras */
655 	io_write32(qm->io_base + HISI_QM_ABNML_INT_MASK,
656 		   HISI_QM_ABNML_INT_MASK_CFG);
657 	/* Set doorbell timeout to QM_DB_TIMEOUT_SET ns */
658 	io_write32(qm->io_base + QM_DB_TIMEOUT_CFG, QM_DB_TIMEOUT_SET);
659 }
660 
661 static enum hisi_drv_status qm_sqc_cfg(struct hisi_qp *qp)
662 {
663 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
664 	struct hisi_qm *qm = qp->qm;
665 	struct qm_sqc *sqc = qm->cfg_xqc.sqc;
666 	struct qm_mailbox mb = { };
667 
668 	mutex_lock(&qm->mailbox_lock);
669 	memset(sqc, 0, sizeof(struct qm_sqc));
670 	reg_pair_from_64(qp->sqe_dma, &sqc->base_h, &sqc->base_l);
671 	sqc->dw3 = (HISI_QM_Q_DEPTH - 1) |
672 		    SHIFT_U32(qm->sqe_log2_size, QM_SQ_SQE_SIZE_SHIFT);
673 	sqc->rand_data = QM_DB_RAND_DATA;
674 	sqc->cq_num = qp->qp_id;
675 	sqc->w13 = BIT32(QM_SQ_ORDER_SHIFT) |
676 		   SHIFT_U32(qp->sq_type, QM_SQ_TYPE_SHIFT);
677 
678 	qm_mb_init(&mb, QM_MB_CMD_SQC, qm->cfg_xqc.sqc_dma, qp->qp_id,
679 		   QM_MB_OP_WR);
680 	ret = qm_mb_nolock(qm, &mb);
681 	mutex_unlock(&qm->mailbox_lock);
682 
683 	return ret;
684 }
685 
686 static enum hisi_drv_status qm_cqc_cfg(struct hisi_qp *qp)
687 {
688 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
689 	struct hisi_qm *qm = qp->qm;
690 	struct qm_cqc *cqc = qm->cfg_xqc.cqc;
691 	struct qm_mailbox mb = { };
692 
693 	mutex_lock(&qm->mailbox_lock);
694 	memset(cqc, 0, sizeof(struct qm_cqc));
695 	reg_pair_from_64(qp->cqe_dma, &cqc->base_h, &cqc->base_l);
696 	cqc->dw3 = (HISI_QM_Q_DEPTH - 1) |
697 		    SHIFT_U32(QM_CQE_SIZE, QM_CQ_CQE_SIZE_SHIFT);
698 	cqc->rand_data = QM_DB_RAND_DATA;
699 	cqc->dw6 = PHASE_DEFAULT_VAL;
700 
701 	qm_mb_init(&mb, QM_MB_CMD_CQC, qm->cfg_xqc.cqc_dma, qp->qp_id,
702 		   QM_MB_OP_WR);
703 	ret = qm_mb_nolock(qm, &mb);
704 	mutex_unlock(&qm->mailbox_lock);
705 
706 	return ret;
707 }
708 
709 struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, uint8_t sq_type)
710 {
711 	struct hisi_qp *qp = NULL;
712 	int cur_idx = 0;
713 	uint32_t i = 0;
714 
715 	mutex_lock(&qm->qp_lock);
716 	if (qm->qp_in_used == qm->qp_num) {
717 		EMSG("All %"PRIu32" queues of QM are busy", qm->qp_num);
718 		goto err_proc;
719 	}
720 
721 	for (i = 0; i < qm->qp_num; i++) {
722 		cur_idx = (qm->qp_idx + i) % qm->qp_num;
723 		if (!qm->qp_array[cur_idx].used) {
724 			qm->qp_array[cur_idx].used = true;
725 			qm->qp_idx = cur_idx + 1;
726 			break;
727 		}
728 	}
729 
730 	qp = qm->qp_array + cur_idx;
731 	memset(qp->cqe, 0, sizeof(struct qm_cqe) * HISI_QM_Q_DEPTH);
732 	qp->sq_type = sq_type;
733 	qp->sq_tail = 0;
734 	qp->cq_head = 0;
735 	qp->cqc_phase = true;
736 
737 	if (qm_sqc_cfg(qp)) {
738 		EMSG("Fail to set qp[%"PRIu32"] sqc", qp->qp_id);
739 		goto err_proc;
740 	}
741 
742 	if (qm_cqc_cfg(qp)) {
743 		EMSG("Fail to set qp[%"PRIu32"] cqc", qp->qp_id);
744 		goto err_proc;
745 	}
746 
747 	qm->qp_in_used++;
748 	mutex_unlock(&qm->qp_lock);
749 	return qp;
750 
751 err_proc:
752 	qp->sq_type = 0;
753 	qp->cqc_phase = false;
754 	mutex_unlock(&qm->qp_lock);
755 	return NULL;
756 }
757 
758 void hisi_qm_release_qp(struct hisi_qp *qp)
759 {
760 	struct hisi_qm *qm = NULL;
761 
762 	if (!qp) {
763 		EMSG("QP is NULL");
764 		return;
765 	}
766 
767 	qm = qp->qm;
768 	mutex_lock(&qm->qp_lock);
769 	qm->qp_in_used--;
770 	qp->used = false;
771 	mutex_unlock(&qm->qp_lock);
772 }
773 
774 static void qm_sq_tail_update(struct hisi_qp *qp)
775 {
776 	if (qp->sq_tail == HISI_QM_Q_DEPTH - 1)
777 		qp->sq_tail = 0;
778 	else
779 		qp->sq_tail++;
780 }
781 
782 /*
783  * One task thread will just bind to one hardware queue, and
784  * hardware does not support msi. So we have no lock here.
785  */
786 enum hisi_drv_status hisi_qp_send(struct hisi_qp *qp, void *msg)
787 {
788 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
789 	struct hisi_qm *qm = NULL;
790 	void *sqe = NULL;
791 
792 	if (!qp) {
793 		EMSG("QP is NULL");
794 		return HISI_QM_DRVCRYPT_EINVAL;
795 	}
796 
797 	qm = qp->qm;
798 	ret = qm->dev_status_check(qm);
799 	if (ret)
800 		return ret;
801 
802 	sqe = (void *)((vaddr_t)qp->sqe + qm->sqe_size * qp->sq_tail);
803 	memset(sqe, 0, qm->sqe_size);
804 
805 	ret = qp->fill_sqe(sqe, msg);
806 	if (ret) {
807 		EMSG("Fail to fill sqe");
808 		return ret;
809 	}
810 
811 	qm_sq_tail_update(qp);
812 
813 	dsb();
814 	qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_SQ, qp->sq_tail, 0);
815 
816 	return HISI_QM_DRVCRYPT_NO_ERR;
817 }
818 
819 static void qm_cq_head_update(struct hisi_qp *qp)
820 {
821 	if (qp->cq_head == HISI_QM_Q_DEPTH - 1) {
822 		qp->cqc_phase = !qp->cqc_phase;
823 		qp->cq_head = 0;
824 	} else {
825 		qp->cq_head++;
826 	}
827 }
828 
829 static enum hisi_drv_status hisi_qp_recv(struct hisi_qp *qp, void *msg)
830 {
831 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
832 	struct hisi_qm *qm = qp->qm;
833 	struct qm_cqe *cqe = NULL;
834 	void *sqe = NULL;
835 
836 	ret = qm->dev_status_check(qm);
837 	if (ret)
838 		return ret;
839 
840 	cqe = qp->cqe + qp->cq_head;
841 	if (QM_CQE_PHASE(cqe) == qp->cqc_phase) {
842 		dsb_osh();
843 		sqe = (void *)((vaddr_t)qp->sqe + qm->sqe_size * cqe->sq_head);
844 		ret = qp->parse_sqe(sqe, msg);
845 		qm_cq_head_update(qp);
846 		qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->cq_head, 0);
847 		if (ret) {
848 			EMSG("Fail to parse sqe");
849 			return ret;
850 		}
851 	} else {
852 		return HISI_QM_DRVCRYPT_NO_ERR;
853 	}
854 
855 	return HISI_QM_DRVCRYPT_RECV_DONE;
856 }
857 
858 static void qm_dfx_dump(struct hisi_qm *qm)
859 {
860 	const struct qm_dfx_registers *regs = qm_dfx_regs;
861 	__maybe_unused uint32_t val = 0;
862 
863 	if (qm->fun_type == HISI_QM_HW_VF)
864 		return;
865 
866 	while (regs->reg_name) {
867 		val = io_read32(qm->io_base + regs->reg_offset);
868 		EMSG("%s= 0x%" PRIx32, regs->reg_name, val);
869 		regs++;
870 	}
871 }
872 
873 enum hisi_drv_status hisi_qp_recv_sync(struct hisi_qp *qp, void *msg)
874 {
875 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
876 	uint32_t timeout = 0;
877 
878 	if (!qp) {
879 		EMSG("QP is NULL");
880 		return HISI_QM_DRVCRYPT_EINVAL;
881 	}
882 
883 	timeout = timeout_init_us(QM_SINGLE_WAIT_TIME *
884 				  HISI_QM_RECV_SYNC_TIMEOUT);
885 	while (!timeout_elapsed(timeout)) {
886 		ret = hisi_qp_recv(qp, msg);
887 		if (ret) {
888 			if (ret != HISI_QM_DRVCRYPT_RECV_DONE) {
889 				EMSG("QM recv task error");
890 				qm_dfx_dump(qp->qm);
891 				return ret;
892 			} else {
893 				return HISI_QM_DRVCRYPT_NO_ERR;
894 			}
895 		}
896 	}
897 
898 	EMSG("QM recv task timeout");
899 	qm_dfx_dump(qp->qm);
900 	return HISI_QM_DRVCRYPT_ETMOUT;
901 }
902