xref: /optee_os/core/drivers/crypto/hisilicon/hisi_qm.c (revision 5d5d7d0b1c038a6836be9f0b38585f5aa6a4dd01)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright 2022-2023 HiSilicon Limited.
4  * Kunpeng hardware accelerator queue management module.
5  */
6 #include "hisi_qm.h"
7 
8 #define QM_FVT_CFG_RDY_BIT	0x1
9 /* Doorbell */
10 #define QM_DOORBELL_SQ_CQ_BASE	0x1000
11 #define QM_DB_CMD_SHIFT		12
12 #define QM_DB_RAND_DATA_SHIFT	16
13 #define QM_DB_INDEX_SHIFT	32
14 #define QM_DB_PRIORITY_SHIFT	48
15 #define QM_DB_RAND_DATA		0x5a
16 #define QM_DOORBELL_CMD_SQ	0
17 #define QM_DOORBELL_CMD_CQ	1
18 /* Mailbox */
19 #define QM_MAILBOX_BASE		0x300
20 #define QM_MAILBOX_DATA_ADDR_L	0x304
21 #define QM_MAILBOX_DATA_ADDR_H	0x308
22 #define QM_MB_BUSY_SHIFT	13
23 #define QM_MB_BUSY_BIT		BIT32(QM_MB_BUSY_SHIFT)
24 #define QM_MB_OP_SHIFT		14
25 #define QM_MB_OP_WR		0
26 #define QM_MB_OP_RD		1
27 #define QM_MB_STATUS_MASK	GENMASK_32(12, 9)
28 #define QM_MB_WAIT_READY_CNT	10
29 #define QM_MB_WAIT_MAX_CNT	21000
30 #define QM_MB_WAIT_PERIOD	200
31 /* XQC_VFT */
32 #define QM_VFT_CFG_OP_ENABLE	0x100054
33 #define QM_VFT_CFG_OP_WR	0x100058
34 #define QM_VFT_CFG_TYPE		0x10005c
35 #define QM_VFT_CFG_ADDRESS	0x100060
36 #define QM_VFT_CFG_DATA_L	0x100064
37 #define QM_VFT_CFG_DATA_H	0x100068
38 #define QM_VFT_CFG_RDY		0x10006c
39 #define QM_SQC_VFT		0
40 #define QM_CQC_VFT		1
41 #define QM_SQC_VFT_START_SQN_SHIFT 28
42 #define QM_SQC_VFT_VALID	BIT64(44)
43 #define QM_SQC_VFT_SQ_NUM_SHIFT 45
44 #define QM_CQC_VFT_VALID	BIT(28)
45 #define QM_VFT_WRITE		0
46 #define QM_VFT_READ		1
47 #define QM_SQC_VFT_BASE_MASK	0x3ff
48 #define QM_SQC_VFT_NUM_MASK	0x3ff
49 /* QM INIT */
50 #define QM_MEM_START_INIT	0x100040
51 #define QM_MEM_INIT_DONE	0x100044
52 #define QM_VF_AEQ_INT_MASK	0x4
53 #define QM_VF_AEQ_INT_MASK_EN	0x1
54 #define QM_VF_EQ_INT_MASK	0xc
55 #define QM_VF_EQ_INT_MASK_EN	0x1
56 #define QM_ARUSER_M_CFG_1	0x100088
57 #define QM_ARUSER_M_CFG_ENABLE	0x100090
58 #define QM_AWUSER_M_CFG_1	0x100098
59 #define QM_AWUSER_M_CFG_ENABLE	0x1000a0
60 #define QM_AXUSER_CFG		0x40001070
61 #define AXUSER_M_CFG_ENABLE	0x7ffffc
62 #define QM_AXI_M_CFG		0x1000ac
63 #define AXI_M_CFG		0xffff
64 #define QM_PEH_AXUSER_CFG	0x1000cc
65 #define PEH_AXUSER_CFG		0x400801
66 #define QM_CACHE_CTL		0x100050
67 #define QM_CACHE_CFG		0x4893
68 #define QM_CACHE_WB_START	0x204
69 #define QM_CACHE_WB_DONE	0x208
70 #define QM_PM_CTRL0		0x100148
71 #define QM_IDLE_DISABLE		BIT(9)
72 #define QM_DB_TIMEOUT_CFG	0x100074
73 #define QM_DB_TIMEOUT_SET	0x1fffff
74 /* XQC shift */
75 #define QM_SQ_SQE_SIZE_SHIFT	12
76 #define QM_SQ_ORDER_SHIFT	4
77 #define QM_SQ_TYPE_SHIFT	8
78 #define QM_CQE_SIZE		4
79 #define QM_CQ_CQE_SIZE_SHIFT	12
80 /* CQE */
81 #define QM_CQE_PHASE(cqe) (((cqe)->w7) & QM_FVT_CFG_RDY_BIT)
82 
83 enum qm_mailbox_common_cmd {
84 	QM_MB_CMD_SQC = 0x0,
85 	QM_MB_CMD_CQC,
86 	QM_MB_CMD_EQC,
87 	QM_MB_CMD_AEQC,
88 	QM_MB_CMD_SQC_BT,
89 	QM_MB_CMD_CQC_BT,
90 	QM_MB_CMD_SQC_VFT,
91 };
92 
93 enum qm_mailbox_cmd_v3 {
94 	QM_MB_CM_CLOSE_QM = 0x7,
95 	QM_MB_CMD_CLOSE_QP,
96 	QM_MB_CMD_FLUSH_QM,
97 	QM_MB_CMD_FLUSH_QP,
98 	QM_MB_CMD_SRC = 0xc,
99 	QM_MB_CMD_DST,
100 	QM_MB_CMD_STOP_QM,
101 };
102 
103 struct qm_mailbox {
104 	union {
105 		struct {
106 			uint16_t w0;
107 			uint16_t queue;
108 			uint32_t base_l;
109 			uint32_t base_h;
110 			uint32_t token;
111 		};
112 		uint64_t x[2];
113 	};
114 };
115 
116 struct qm_dfx_registers {
117 	const char *reg_name;
118 	uint32_t reg_offset;
119 };
120 
121 static const struct qm_dfx_registers qm_dfx_regs[] = {
122 	{ .reg_name = "QM_ECC_1BIT_CNT           ", .reg_offset = 0x104000 },
123 	{ .reg_name = "QM_ECC_MBIT_CNT           ", .reg_offset = 0x104008 },
124 	{ .reg_name = "QM_DFX_MB_CNT             ", .reg_offset = 0x104018 },
125 	{ .reg_name = "QM_DFX_DB_CNT             ", .reg_offset = 0x104028 },
126 	{ .reg_name = "QM_DFX_SQE_CNT            ", .reg_offset = 0x104038 },
127 	{ .reg_name = "QM_DFX_CQE_CNT            ", .reg_offset = 0x104048 },
128 	{ .reg_name = "QM_DFX_SEND_SQE_TO_ACC_CNT", .reg_offset = 0x104050 },
129 	{ .reg_name = "QM_DFX_WB_SQE_FROM_ACC_CNT", .reg_offset = 0x104058 },
130 	{ .reg_name = "QM_DFX_ACC_FINISH_CNT     ", .reg_offset = 0x104060 },
131 	{ .reg_name = "QM_DFX_CQE_ERR_CNT        ", .reg_offset = 0x1040b4 },
132 	{ }
133 };
134 
135 void hisi_qm_get_version(struct hisi_qm *qm)
136 {
137 	qm->version = io_read32(qm->io_base + HISI_QM_REVISON_ID_BASE) &
138 		      HISI_QM_REVISON_ID_MASK;
139 }
140 
141 static void qm_db(struct hisi_qm *qm, uint16_t qn, uint8_t cmd, uint16_t index,
142 		  uint8_t priority)
143 {
144 	uint64_t doorbell = 0;
145 
146 	doorbell = qn | SHIFT_U64(cmd, QM_DB_CMD_SHIFT) |
147 		   SHIFT_U64(QM_DB_RAND_DATA, QM_DB_RAND_DATA_SHIFT) |
148 		   SHIFT_U64(index, QM_DB_INDEX_SHIFT) |
149 		   SHIFT_U64(priority, QM_DB_PRIORITY_SHIFT);
150 
151 	io_write64(qm->io_base + QM_DOORBELL_SQ_CQ_BASE, doorbell);
152 }
153 
154 static void qm_mb_write(struct hisi_qm *qm, struct qm_mailbox *mb)
155 {
156 	vaddr_t dst = qm->io_base + QM_MAILBOX_BASE;
157 
158 	write_64bit_pair(dst, mb->x[1], mb->x[0]);
159 	dsb_osh();
160 }
161 
162 static void qm_mb_read(struct hisi_qm *qm, struct qm_mailbox *mb)
163 {
164 	vaddr_t mb_base = qm->io_base + QM_MAILBOX_BASE;
165 
166 	read_64bit_pair(mb_base, mb->x + 1, mb->x);
167 	dsb_osh();
168 }
169 
170 static enum hisi_drv_status qm_wait_mb_ready(struct hisi_qm *qm)
171 {
172 	struct qm_mailbox mb = { };
173 	uint32_t timeout = 0;
174 
175 	timeout = timeout_init_us(QM_MB_WAIT_PERIOD * QM_MB_WAIT_READY_CNT);
176 	while (!timeout_elapsed(timeout)) {
177 		/* 128 bits should be read from hardware at one time*/
178 		qm_mb_read(qm, &mb);
179 		if (!(mb.w0 & QM_MB_BUSY_BIT))
180 			return HISI_QM_DRVCRYPT_NO_ERR;
181 	}
182 
183 	EMSG("QM mailbox is busy to start!");
184 
185 	return HISI_QM_DRVCRYPT_EBUSY;
186 }
187 
188 static enum hisi_drv_status qm_wait_mb_finish(struct hisi_qm *qm,
189 					      struct qm_mailbox *mb)
190 {
191 	uint32_t timeout = 0;
192 
193 	timeout = timeout_init_us(QM_MB_WAIT_PERIOD * QM_MB_WAIT_MAX_CNT);
194 	while (!timeout_elapsed(timeout)) {
195 		qm_mb_read(qm, mb);
196 		if (!(mb->w0 & QM_MB_BUSY_BIT)) {
197 			if (mb->w0 & QM_MB_STATUS_MASK) {
198 				EMSG("QM mailbox operation failed!");
199 				return HISI_QM_DRVCRYPT_EIO;
200 			} else {
201 				return HISI_QM_DRVCRYPT_NO_ERR;
202 			}
203 		}
204 	}
205 
206 	return HISI_QM_DRVCRYPT_ETMOUT;
207 }
208 
209 static void qm_mb_init(struct qm_mailbox *mb, uint8_t cmd, uint64_t base,
210 		       uint16_t qnum, uint8_t op)
211 {
212 	mb->w0 = cmd | SHIFT_U32(op, QM_MB_OP_SHIFT) |  QM_MB_BUSY_BIT;
213 	mb->queue = qnum;
214 	reg_pair_from_64(base, &mb->base_h, &mb->base_l);
215 	mb->token = 0;
216 }
217 
218 static enum hisi_drv_status qm_mb_nolock(struct hisi_qm *qm,
219 					 struct qm_mailbox *mb)
220 {
221 	if (qm_wait_mb_ready(qm))
222 		return HISI_QM_DRVCRYPT_EBUSY;
223 
224 	qm_mb_write(qm, mb);
225 
226 	return qm_wait_mb_finish(qm, mb);
227 }
228 
229 static enum hisi_drv_status hisi_qm_mb_write(struct hisi_qm *qm, uint8_t cmd,
230 					     uintptr_t dma_addr, uint16_t qnum)
231 {
232 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
233 	struct qm_mailbox mb = { };
234 
235 	qm_mb_init(&mb, cmd, dma_addr, qnum, QM_MB_OP_WR);
236 	mutex_lock(&qm->mailbox_lock);
237 	ret = qm_mb_nolock(qm, &mb);
238 	mutex_unlock(&qm->mailbox_lock);
239 
240 	return ret;
241 }
242 
243 static enum hisi_drv_status hisi_qm_mb_read(struct hisi_qm *qm, uint64_t *base,
244 					    uint8_t cmd, uint16_t qnum)
245 {
246 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
247 	struct qm_mailbox mb = { };
248 
249 	qm_mb_init(&mb, cmd, 0, qnum, QM_MB_OP_RD);
250 	mutex_lock(&qm->mailbox_lock);
251 	ret = qm_mb_nolock(qm, &mb);
252 	mutex_unlock(&qm->mailbox_lock);
253 	if (ret)
254 		return ret;
255 
256 	reg_pair_from_64(*base, &mb.base_h, &mb.base_l);
257 
258 	return HISI_QM_DRVCRYPT_NO_ERR;
259 }
260 
261 static void qm_cfg_vft_data(struct hisi_qm *qm, uint8_t vft_type,
262 			    uint32_t base, uint32_t number)
263 {
264 	uint32_t data_h = 0;
265 	uint32_t data_l = 0;
266 	uint64_t data = 0;
267 
268 	switch (vft_type) {
269 	case QM_SQC_VFT:
270 		data = SHIFT_U64(base, QM_SQC_VFT_START_SQN_SHIFT) |
271 			QM_SQC_VFT_VALID |
272 			SHIFT_U64((number - 1), QM_SQC_VFT_SQ_NUM_SHIFT);
273 		break;
274 	case QM_CQC_VFT:
275 		data = QM_CQC_VFT_VALID;
276 		break;
277 	default:
278 		panic("Invalid vft type");
279 	}
280 
281 	reg_pair_from_64(data, &data_h, &data_l);
282 	io_write32(qm->io_base + QM_VFT_CFG_DATA_L, data_l);
283 	io_write32(qm->io_base + QM_VFT_CFG_DATA_H, data_h);
284 }
285 
286 static enum hisi_drv_status qm_set_vft_common(struct hisi_qm *qm,
287 					      uint8_t vft_type,
288 					      uint32_t function,
289 					      uint32_t base,
290 					      uint32_t num)
291 {
292 	uint32_t val = 0;
293 
294 	if (IO_READ32_POLL_TIMEOUT(qm->io_base + QM_VFT_CFG_RDY, val,
295 				   val & QM_FVT_CFG_RDY_BIT, POLL_PERIOD,
296 				   POLL_TIMEOUT)) {
297 		EMSG("QM VFT is not ready");
298 		return HISI_QM_DRVCRYPT_EBUSY;
299 	}
300 
301 	io_write32(qm->io_base + QM_VFT_CFG_OP_WR, QM_VFT_WRITE);
302 	io_write32(qm->io_base + QM_VFT_CFG_TYPE, vft_type);
303 	io_write32(qm->io_base + QM_VFT_CFG_ADDRESS, function);
304 	qm_cfg_vft_data(qm, vft_type, base, num);
305 	io_write32(qm->io_base + QM_VFT_CFG_RDY, 0x0);
306 	io_write32(qm->io_base + QM_VFT_CFG_OP_ENABLE, QM_FVT_CFG_RDY_BIT);
307 
308 	if (IO_READ32_POLL_TIMEOUT(qm->io_base + QM_VFT_CFG_RDY, val,
309 				   val & QM_FVT_CFG_RDY_BIT, POLL_PERIOD,
310 				   POLL_TIMEOUT)) {
311 		EMSG("QM VFT is not ready");
312 		return HISI_QM_DRVCRYPT_EBUSY;
313 	}
314 
315 	return HISI_QM_DRVCRYPT_NO_ERR;
316 }
317 
318 static enum hisi_drv_status qm_set_xqc_vft(struct hisi_qm *qm,
319 					   uint32_t function,
320 					   uint32_t base, uint32_t num)
321 {
322 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
323 	int i = 0;
324 
325 	for (i = QM_SQC_VFT; i <= QM_CQC_VFT; i++) {
326 		ret = qm_set_vft_common(qm, i, function, base, num);
327 		if (ret) {
328 			EMSG("QM set type %d fail", i);
329 			return ret;
330 		}
331 	}
332 
333 	return HISI_QM_DRVCRYPT_NO_ERR;
334 }
335 
336 static enum hisi_drv_status qm_get_vft(struct hisi_qm *qm, uint32_t *base,
337 				       uint32_t *num)
338 {
339 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
340 	uint64_t sqc_vft = 0;
341 
342 	ret = hisi_qm_mb_read(qm, &sqc_vft, QM_MB_CMD_SQC_VFT, 0);
343 	if (ret)
344 		return ret;
345 
346 	*base = (sqc_vft >> QM_SQC_VFT_START_SQN_SHIFT) & QM_SQC_VFT_BASE_MASK;
347 	*num = ((sqc_vft >> QM_SQC_VFT_SQ_NUM_SHIFT) & QM_SQC_VFT_NUM_MASK) + 1;
348 
349 	return HISI_QM_DRVCRYPT_NO_ERR;
350 }
351 
352 static void qp_free(struct hisi_qm *qm, uint32_t id)
353 {
354 	struct hisi_qp *qp = &qm->qp_array[id];
355 
356 	free(qp->sqe);
357 	free(qp->cqe);
358 }
359 
360 static enum hisi_drv_status qp_alloc(struct hisi_qm *qm, uint32_t id)
361 {
362 	size_t sq_size = qm->sqe_size * HISI_QM_Q_DEPTH;
363 	size_t cq_size = sizeof(struct qm_cqe) * HISI_QM_Q_DEPTH;
364 	struct hisi_qp *qp = &qm->qp_array[id];
365 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
366 
367 	qp->sqe = memalign(HISI_QM_ALIGN128, sq_size);
368 	if (!qp->sqe) {
369 		EMSG("Fail to malloc sq[%"PRIu32"]", id);
370 		return HISI_QM_DRVCRYPT_ENOMEM;
371 	}
372 	qp->sqe_dma = virt_to_phys(qp->sqe);
373 	qp->cqe = memalign(HISI_QM_ALIGN32, cq_size);
374 	if (!qp->cqe) {
375 		EMSG("Fail to malloc cq[%"PRIu32"]", id);
376 		ret = HISI_QM_DRVCRYPT_ENOMEM;
377 		goto free_sqe;
378 	}
379 	qp->cqe_dma = virt_to_phys(qp->cqe);
380 
381 	qp->qp_id = id;
382 	qp->qm = qm;
383 	return HISI_QM_DRVCRYPT_NO_ERR;
384 
385 free_sqe:
386 	free(qp->sqe);
387 	return ret;
388 }
389 
390 static void hisi_qm_free_xqc(struct qm_xqc *xqc)
391 {
392 	free(xqc->cqc);
393 	free(xqc->sqc);
394 }
395 
396 static void qm_free(struct hisi_qm *qm)
397 {
398 	unsigned int i = 0;
399 
400 	for (i = 0; i < qm->qp_num; i++)
401 		qp_free(qm, i);
402 
403 	free(qm->qp_array);
404 	hisi_qm_free_xqc(&qm->xqc);
405 	hisi_qm_free_xqc(&qm->cfg_xqc);
406 }
407 
408 static enum hisi_drv_status hisi_qm_alloc_xqc(struct qm_xqc *xqc,
409 					      uint32_t qp_num)
410 {
411 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
412 	size_t sqc_size = 0;
413 	size_t cqc_size = 0;
414 
415 	sqc_size = sizeof(struct qm_sqc) * qp_num;
416 	cqc_size = sizeof(struct qm_cqc) * qp_num;
417 
418 	xqc->sqc = memalign(HISI_QM_ALIGN32, sqc_size);
419 	if (!xqc->sqc) {
420 		EMSG("Fail to malloc sqc");
421 		return HISI_QM_DRVCRYPT_ENOMEM;
422 	}
423 	memset(xqc->sqc, 0, sqc_size);
424 	xqc->sqc_dma = virt_to_phys(xqc->sqc);
425 
426 	xqc->cqc = memalign(HISI_QM_ALIGN32, cqc_size);
427 	if (!xqc->cqc) {
428 		EMSG("Fail to malloc cqc");
429 		ret = HISI_QM_DRVCRYPT_ENOMEM;
430 		goto free_sqc;
431 	}
432 	memset(xqc->cqc, 0, cqc_size);
433 	xqc->cqc_dma = virt_to_phys(xqc->cqc);
434 
435 	return HISI_QM_DRVCRYPT_NO_ERR;
436 
437 	free(xqc->cqc);
438 free_sqc:
439 	free(xqc->sqc);
440 	return ret;
441 }
442 
443 static enum hisi_drv_status qm_alloc(struct hisi_qm *qm)
444 {
445 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
446 	int32_t j;
447 	uint32_t i;
448 
449 	ret = hisi_qm_alloc_xqc(&qm->xqc, qm->qp_num);
450 	if (ret)
451 		return ret;
452 
453 	ret = hisi_qm_alloc_xqc(&qm->cfg_xqc, 1);
454 	if (ret)
455 		goto free_xqc;
456 
457 	qm->qp_array = calloc(qm->qp_num, sizeof(struct hisi_qp));
458 	if (!qm->qp_array) {
459 		EMSG("Fail to malloc qp_array");
460 		ret = HISI_QM_DRVCRYPT_ENOMEM;
461 		goto free_cfg_xqc;
462 	}
463 
464 	for (i = 0; i < qm->qp_num; i++) {
465 		ret = qp_alloc(qm, i);
466 		if (ret)
467 			goto free_qp_mem;
468 	}
469 
470 	return HISI_QM_DRVCRYPT_NO_ERR;
471 
472 free_qp_mem:
473 	for (j = (int)i - 1; j >= 0; j--)
474 		qp_free(qm, j);
475 	free(qm->qp_array);
476 free_cfg_xqc:
477 	hisi_qm_free_xqc(&qm->cfg_xqc);
478 free_xqc:
479 	hisi_qm_free_xqc(&qm->xqc);
480 	return ret;
481 }
482 
483 enum hisi_drv_status hisi_qm_init(struct hisi_qm *qm)
484 {
485 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
486 
487 	if (qm->fun_type == HISI_QM_HW_VF) {
488 		ret = qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
489 		if (ret) {
490 			EMSG("Fail to get function vft config");
491 			return ret;
492 		}
493 	}
494 
495 	if (!qm->qp_num || !qm->sqe_size) {
496 		EMSG("Invalid QM parameters");
497 		return HISI_QM_DRVCRYPT_EINVAL;
498 	}
499 
500 	ret = qm_alloc(qm);
501 	if (ret)
502 		return ret;
503 
504 	qm->qp_in_used = 0;
505 	qm->qp_idx = 0;
506 	mutex_init(&qm->qp_lock);
507 	mutex_init(&qm->mailbox_lock);
508 
509 	return HISI_QM_DRVCRYPT_NO_ERR;
510 }
511 
512 static void qm_cache_writeback(struct hisi_qm *qm)
513 {
514 	uint32_t val = 0;
515 
516 	io_write32(qm->io_base + QM_CACHE_WB_START, QM_FVT_CFG_RDY_BIT);
517 
518 	if (IO_READ32_POLL_TIMEOUT(qm->io_base + QM_CACHE_WB_DONE, val,
519 				   val & QM_FVT_CFG_RDY_BIT, POLL_PERIOD,
520 				   POLL_TIMEOUT))
521 		panic("QM writeback sqc cache fail");
522 }
523 
524 void hisi_qm_uninit(struct hisi_qm *qm)
525 {
526 	qm_cache_writeback(qm);
527 	qm_free(qm);
528 	mutex_destroy(&qm->qp_lock);
529 	mutex_destroy(&qm->mailbox_lock);
530 }
531 
532 static enum hisi_drv_status qm_hw_mem_reset(struct hisi_qm *qm)
533 {
534 	uint32_t val = 0;
535 
536 	io_write32(qm->io_base + QM_MEM_START_INIT, QM_FVT_CFG_RDY_BIT);
537 
538 	if (IO_READ32_POLL_TIMEOUT(qm->io_base + QM_MEM_INIT_DONE, val,
539 				   val & QM_FVT_CFG_RDY_BIT, POLL_PERIOD,
540 				   POLL_TIMEOUT))
541 		return HISI_QM_DRVCRYPT_EBUSY;
542 
543 	return HISI_QM_DRVCRYPT_NO_ERR;
544 }
545 
546 static enum hisi_drv_status qm_func_vft_cfg(struct hisi_qm *qm)
547 {
548 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
549 	uint32_t q_base = qm->qp_num;
550 	uint32_t act_q_num = 0;
551 	unsigned int i = 0;
552 	unsigned int j = 0;
553 
554 	if (!qm->vfs_num)
555 		return HISI_QM_DRVCRYPT_NO_ERR;
556 
557 	if (qm->vfs_num > HISI_QM_MAX_VFS_NUM) {
558 		EMSG("Invalid QM vfs_num");
559 		return HISI_QM_DRVCRYPT_EINVAL;
560 	}
561 
562 	for (i = 1; i <= qm->vfs_num; i++) {
563 		act_q_num = HISI_QM_VF_Q_NUM;
564 		ret = qm_set_xqc_vft(qm, i, q_base, act_q_num);
565 		if (ret) {
566 			for (j = 1; j < i; j++)
567 				(void)qm_set_xqc_vft(qm, j, 0, 0);
568 			return ret;
569 		}
570 		q_base += act_q_num;
571 	}
572 
573 	return HISI_QM_DRVCRYPT_NO_ERR;
574 }
575 
576 enum hisi_drv_status hisi_qm_start(struct hisi_qm *qm)
577 {
578 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
579 
580 	if (qm->fun_type == HISI_QM_HW_PF) {
581 		ret = qm_hw_mem_reset(qm);
582 		if (ret) {
583 			EMSG("Fail to reset QM hardware mem");
584 			return ret;
585 		}
586 
587 		ret = qm_set_xqc_vft(qm, 0, qm->qp_base, qm->qp_num);
588 		if (ret) {
589 			EMSG("Fail to set PF xqc_vft");
590 			return ret;
591 		}
592 
593 		ret = qm_func_vft_cfg(qm);
594 		if (ret) {
595 			EMSG("Fail to set VF xqc_vft");
596 			return ret;
597 		}
598 	}
599 
600 	ret = hisi_qm_mb_write(qm, QM_MB_CMD_SQC_BT, qm->xqc.sqc_dma, 0);
601 	if (ret) {
602 		EMSG("Fail to set sqc_bt");
603 		return ret;
604 	}
605 
606 	ret = hisi_qm_mb_write(qm, QM_MB_CMD_CQC_BT, qm->xqc.cqc_dma, 0);
607 	if (ret) {
608 		EMSG("Fail to set cqc_bt");
609 		return ret;
610 	}
611 
612 	/* Security mode does not support msi */
613 	io_write32(qm->io_base + QM_VF_AEQ_INT_MASK, QM_VF_AEQ_INT_MASK_EN);
614 	io_write32(qm->io_base + QM_VF_EQ_INT_MASK, QM_VF_EQ_INT_MASK_EN);
615 
616 	return HISI_QM_DRVCRYPT_NO_ERR;
617 }
618 
619 static void qm_disable_clock_gate(struct hisi_qm *qm)
620 
621 {
622 	if (qm->version == HISI_QM_HW_V2)
623 		return;
624 
625 	io_setbits32(qm->io_base + QM_PM_CTRL0, QM_IDLE_DISABLE);
626 }
627 
628 void hisi_qm_dev_init(struct hisi_qm *qm)
629 {
630 	if (qm->fun_type == HISI_QM_HW_VF)
631 		return;
632 
633 	qm_disable_clock_gate(qm);
634 
635 	/* QM user domain */
636 	io_write32(qm->io_base + QM_ARUSER_M_CFG_1, QM_AXUSER_CFG);
637 	io_write32(qm->io_base + QM_ARUSER_M_CFG_ENABLE, AXUSER_M_CFG_ENABLE);
638 	io_write32(qm->io_base + QM_AWUSER_M_CFG_1, QM_AXUSER_CFG);
639 	io_write32(qm->io_base + QM_AWUSER_M_CFG_ENABLE, AXUSER_M_CFG_ENABLE);
640 	/* QM cache */
641 	io_write32(qm->io_base + QM_AXI_M_CFG, AXI_M_CFG);
642 
643 	if (qm->version == HISI_QM_HW_V2) {
644 		/* Disable FLR triggered by BME(bus master enable) */
645 		io_write32(qm->io_base + QM_PEH_AXUSER_CFG, PEH_AXUSER_CFG);
646 		/* Set sec sqc and cqc cache wb threshold 4 */
647 		io_write32(qm->io_base + QM_CACHE_CTL, QM_CACHE_CFG);
648 	}
649 	/* Disable QM ras */
650 	io_write32(qm->io_base + HISI_QM_ABNML_INT_MASK,
651 		   HISI_QM_ABNML_INT_MASK_CFG);
652 	/* Set doorbell timeout to QM_DB_TIMEOUT_SET ns */
653 	io_write32(qm->io_base + QM_DB_TIMEOUT_CFG, QM_DB_TIMEOUT_SET);
654 }
655 
656 static enum hisi_drv_status qm_sqc_cfg(struct hisi_qp *qp)
657 {
658 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
659 	struct hisi_qm *qm = qp->qm;
660 	struct qm_sqc *sqc = qm->cfg_xqc.sqc;
661 	struct qm_mailbox mb = { };
662 
663 	mutex_lock(&qm->mailbox_lock);
664 	memset(sqc, 0, sizeof(struct qm_sqc));
665 	reg_pair_from_64(qp->sqe_dma, &sqc->base_h, &sqc->base_l);
666 	sqc->dw3 = (HISI_QM_Q_DEPTH - 1) |
667 		    SHIFT_U32(qm->sqe_log2_size, QM_SQ_SQE_SIZE_SHIFT);
668 	sqc->rand_data = QM_DB_RAND_DATA;
669 	sqc->cq_num = qp->qp_id;
670 	sqc->w13 = BIT32(QM_SQ_ORDER_SHIFT) |
671 		   SHIFT_U32(qp->sq_type, QM_SQ_TYPE_SHIFT);
672 
673 	qm_mb_init(&mb, QM_MB_CMD_SQC, qm->cfg_xqc.sqc_dma, qp->qp_id,
674 		   QM_MB_OP_WR);
675 	ret = qm_mb_nolock(qm, &mb);
676 	mutex_unlock(&qm->mailbox_lock);
677 
678 	return ret;
679 }
680 
681 static enum hisi_drv_status qm_cqc_cfg(struct hisi_qp *qp)
682 {
683 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
684 	struct hisi_qm *qm = qp->qm;
685 	struct qm_cqc *cqc = qm->cfg_xqc.cqc;
686 	struct qm_mailbox mb = { };
687 
688 	mutex_lock(&qm->mailbox_lock);
689 	memset(cqc, 0, sizeof(struct qm_cqc));
690 	reg_pair_from_64(qp->cqe_dma, &cqc->base_h, &cqc->base_l);
691 	cqc->dw3 = (HISI_QM_Q_DEPTH - 1) |
692 		    SHIFT_U32(QM_CQE_SIZE, QM_CQ_CQE_SIZE_SHIFT);
693 	cqc->rand_data = QM_DB_RAND_DATA;
694 	cqc->dw6 = PHASE_DEFAULT_VAL;
695 
696 	qm_mb_init(&mb, QM_MB_CMD_CQC, qm->cfg_xqc.cqc_dma, qp->qp_id,
697 		   QM_MB_OP_WR);
698 	ret = qm_mb_nolock(qm, &mb);
699 	mutex_unlock(&qm->mailbox_lock);
700 
701 	return ret;
702 }
703 
704 struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, uint8_t sq_type)
705 {
706 	struct hisi_qp *qp = NULL;
707 	int cur_idx = 0;
708 	uint32_t i = 0;
709 
710 	mutex_lock(&qm->qp_lock);
711 	if (qm->qp_in_used == qm->qp_num) {
712 		EMSG("All %"PRIu32" queues of QM are busy", qm->qp_num);
713 		goto err_proc;
714 	}
715 
716 	for (i = 0; i < qm->qp_num; i++) {
717 		cur_idx = (qm->qp_idx + i) % qm->qp_num;
718 		if (!qm->qp_array[cur_idx].used) {
719 			qm->qp_array[cur_idx].used = true;
720 			qm->qp_idx = cur_idx + 1;
721 			break;
722 		}
723 	}
724 
725 	qp = qm->qp_array + cur_idx;
726 	memset(qp->cqe, 0, sizeof(struct qm_cqe) * HISI_QM_Q_DEPTH);
727 	qp->sq_type = sq_type;
728 	qp->sq_tail = 0;
729 	qp->cq_head = 0;
730 	qp->cqc_phase = true;
731 
732 	if (qm_sqc_cfg(qp)) {
733 		EMSG("Fail to set qp[%"PRIu32"] sqc", qp->qp_id);
734 		goto err_qp_release;
735 	}
736 
737 	if (qm_cqc_cfg(qp)) {
738 		EMSG("Fail to set qp[%"PRIu32"] cqc", qp->qp_id);
739 		goto err_qp_release;
740 	}
741 
742 	qm->qp_in_used++;
743 	mutex_unlock(&qm->qp_lock);
744 	return qp;
745 
746 err_qp_release:
747 	qp->used = false;
748 err_proc:
749 	qp->sq_type = 0;
750 	qp->cqc_phase = false;
751 	mutex_unlock(&qm->qp_lock);
752 	return NULL;
753 }
754 
755 void hisi_qm_release_qp(struct hisi_qp *qp)
756 {
757 	struct hisi_qm *qm = NULL;
758 
759 	if (!qp) {
760 		EMSG("QP is NULL");
761 		return;
762 	}
763 
764 	qm = qp->qm;
765 	mutex_lock(&qm->qp_lock);
766 	qm->qp_in_used--;
767 	qp->used = false;
768 	mutex_unlock(&qm->qp_lock);
769 }
770 
771 static void qm_sq_tail_update(struct hisi_qp *qp)
772 {
773 	if (qp->sq_tail == HISI_QM_Q_DEPTH - 1)
774 		qp->sq_tail = 0;
775 	else
776 		qp->sq_tail++;
777 }
778 
779 /*
780  * One task thread will just bind to one hardware queue, and
781  * hardware does not support msi. So we have no lock here.
782  */
783 enum hisi_drv_status hisi_qp_send(struct hisi_qp *qp, void *msg)
784 {
785 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
786 	struct hisi_qm *qm = NULL;
787 	void *sqe = NULL;
788 
789 	if (!qp) {
790 		EMSG("QP is NULL");
791 		return HISI_QM_DRVCRYPT_EINVAL;
792 	}
793 
794 	qm = qp->qm;
795 	ret = qm->dev_status_check(qm);
796 	if (ret)
797 		return ret;
798 
799 	sqe = (void *)((vaddr_t)qp->sqe + qm->sqe_size * qp->sq_tail);
800 	memset(sqe, 0, qm->sqe_size);
801 
802 	ret = qp->fill_sqe(sqe, msg);
803 	if (ret) {
804 		EMSG("Fail to fill sqe");
805 		return ret;
806 	}
807 
808 	qm_sq_tail_update(qp);
809 
810 	dsb();
811 	qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_SQ, qp->sq_tail, 0);
812 
813 	return HISI_QM_DRVCRYPT_NO_ERR;
814 }
815 
816 static void qm_cq_head_update(struct hisi_qp *qp)
817 {
818 	if (qp->cq_head == HISI_QM_Q_DEPTH - 1) {
819 		qp->cqc_phase = !qp->cqc_phase;
820 		qp->cq_head = 0;
821 	} else {
822 		qp->cq_head++;
823 	}
824 }
825 
826 static enum hisi_drv_status hisi_qp_recv(struct hisi_qp *qp, void *msg)
827 {
828 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
829 	struct hisi_qm *qm = qp->qm;
830 	struct qm_cqe *cqe = NULL;
831 	void *sqe = NULL;
832 
833 	ret = qm->dev_status_check(qm);
834 	if (ret)
835 		return ret;
836 
837 	cqe = qp->cqe + qp->cq_head;
838 	if (QM_CQE_PHASE(cqe) == qp->cqc_phase) {
839 		dsb_osh();
840 		sqe = (void *)((vaddr_t)qp->sqe + qm->sqe_size * cqe->sq_head);
841 		ret = qp->parse_sqe(sqe, msg);
842 		qm_cq_head_update(qp);
843 		qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->cq_head, 0);
844 		if (ret) {
845 			EMSG("Fail to parse sqe");
846 			return ret;
847 		}
848 	} else {
849 		return HISI_QM_DRVCRYPT_NO_ERR;
850 	}
851 
852 	return HISI_QM_DRVCRYPT_RECV_DONE;
853 }
854 
855 static void qm_dfx_dump(struct hisi_qm *qm)
856 {
857 	const struct qm_dfx_registers *regs = qm_dfx_regs;
858 	__maybe_unused uint32_t val = 0;
859 
860 	if (qm->fun_type == HISI_QM_HW_VF)
861 		return;
862 
863 	while (regs->reg_name) {
864 		val = io_read32(qm->io_base + regs->reg_offset);
865 		EMSG("%s= 0x%" PRIx32, regs->reg_name, val);
866 		regs++;
867 	}
868 }
869 
870 enum hisi_drv_status hisi_qp_recv_sync(struct hisi_qp *qp, void *msg)
871 {
872 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
873 	uint32_t timeout = 0;
874 
875 	if (!qp || !qp->qm || !msg) {
876 		EMSG("Invalid qp recv sync parameters");
877 		return HISI_QM_DRVCRYPT_EINVAL;
878 	}
879 
880 	timeout = timeout_init_us(QM_SINGLE_WAIT_TIME *
881 				  HISI_QM_RECV_SYNC_TIMEOUT);
882 	while (!timeout_elapsed(timeout)) {
883 		ret = hisi_qp_recv(qp, msg);
884 		if (ret) {
885 			if (ret != HISI_QM_DRVCRYPT_RECV_DONE) {
886 				EMSG("QM recv task error");
887 				qm_dfx_dump(qp->qm);
888 				return ret;
889 			} else {
890 				return HISI_QM_DRVCRYPT_NO_ERR;
891 			}
892 		}
893 	}
894 
895 	EMSG("QM recv task timeout");
896 	qm_dfx_dump(qp->qm);
897 	return HISI_QM_DRVCRYPT_ETMOUT;
898 }
899