xref: /optee_os/core/drivers/crypto/hisilicon/hisi_qm.c (revision 5f7f88c6b9d618d1e068166bbf2b07757350791d)
1 // SPDX-License-Identifier: BSD-2-Clause
2 /*
3  * Copyright 2022-2023 HiSilicon Limited.
4  * Kunpeng hardware accelerator queue management module.
5  */
6 #include "hisi_qm.h"
7 
8 #define QM_FVT_CFG_RDY_BIT	0x1
9 /* Doorbell */
10 #define QM_DOORBELL_SQ_CQ_BASE	0x1000
11 #define QM_DB_CMD_SHIFT		12
12 #define QM_DB_RAND_DATA_SHIFT	16
13 #define QM_DB_INDEX_SHIFT	32
14 #define QM_DB_PRIORITY_SHIFT	48
15 #define QM_DB_RAND_DATA		0x5a
16 #define QM_DOORBELL_CMD_SQ	0
17 #define QM_DOORBELL_CMD_CQ	1
18 /* Mailbox */
19 #define QM_MAILBOX_BASE		0x300
20 #define QM_MAILBOX_DATA_ADDR_L	0x304
21 #define QM_MAILBOX_DATA_ADDR_H	0x308
22 #define QM_MB_BUSY_SHIFT	13
23 #define QM_MB_BUSY_BIT		BIT32(QM_MB_BUSY_SHIFT)
24 #define QM_MB_OP_SHIFT		14
25 #define QM_MB_OP_WR		0
26 #define QM_MB_OP_RD		1
27 #define QM_MB_STATUS_MASK	GENMASK_32(12, 9)
28 #define QM_MB_WAIT_READY_CNT	10
29 #define QM_MB_WAIT_MAX_CNT	21000
30 #define QM_MB_WAIT_PERIOD	200
31 /* XQC_VFT */
32 #define QM_VFT_CFG_OP_ENABLE	0x100054
33 #define QM_VFT_CFG_OP_WR	0x100058
34 #define QM_VFT_CFG_TYPE		0x10005c
35 #define QM_VFT_CFG_ADDRESS	0x100060
36 #define QM_VFT_CFG_DATA_L	0x100064
37 #define QM_VFT_CFG_DATA_H	0x100068
38 #define QM_VFT_CFG_RDY		0x10006c
39 #define QM_SQC_VFT		0
40 #define QM_CQC_VFT		1
41 #define QM_SQC_VFT_START_SQN_SHIFT 28
42 #define QM_SQC_VFT_VALID	BIT64(44)
43 #define QM_SQC_VFT_SQ_NUM_SHIFT 45
44 #define QM_CQC_VFT_VALID	BIT(28)
45 #define QM_VFT_WRITE		0
46 #define QM_VFT_READ		1
47 #define QM_SQC_VFT_BASE_MASK	0x3ff
48 #define QM_SQC_VFT_NUM_MASK	0x3ff
49 /* QM INIT */
50 #define QM_MEM_START_INIT	0x100040
51 #define QM_MEM_INIT_DONE	0x100044
52 #define QM_VF_AEQ_INT_MASK	0x4
53 #define QM_VF_AEQ_INT_MASK_EN	0x1
54 #define QM_VF_EQ_INT_MASK	0xc
55 #define QM_VF_EQ_INT_MASK_EN	0x1
56 #define QM_ARUSER_M_CFG_1	0x100088
57 #define QM_ARUSER_M_CFG_ENABLE	0x100090
58 #define QM_AWUSER_M_CFG_1	0x100098
59 #define QM_AWUSER_M_CFG_ENABLE	0x1000a0
60 #define QM_AXUSER_CFG		0x40001070
61 #define AXUSER_M_CFG_ENABLE	0x7ffffc
62 #define QM_AXI_M_CFG		0x1000ac
63 #define AXI_M_CFG		0xffff
64 #define QM_PEH_AXUSER_CFG	0x1000cc
65 #define PEH_AXUSER_CFG		0x400801
66 #define QM_CACHE_CTL		0x100050
67 #define QM_CACHE_CFG		0x4893
68 #define QM_CACHE_WB_START	0x204
69 #define QM_CACHE_WB_DONE	0x208
70 /* XQC shift */
71 #define QM_SQ_SQE_SIZE_SHIFT	12
72 #define QM_SQ_ORDER_SHIFT	4
73 #define QM_SQ_TYPE_SHIFT	8
74 #define QM_CQE_SIZE		4
75 #define QM_CQ_CQE_SIZE_SHIFT	12
76 /* CQE */
77 #define QM_CQE_PHASE(cqe) (((cqe)->w7) & QM_FVT_CFG_RDY_BIT)
78 
79 enum qm_mailbox_common_cmd {
80 	QM_MB_CMD_SQC = 0x0,
81 	QM_MB_CMD_CQC,
82 	QM_MB_CMD_EQC,
83 	QM_MB_CMD_AEQC,
84 	QM_MB_CMD_SQC_BT,
85 	QM_MB_CMD_CQC_BT,
86 	QM_MB_CMD_SQC_VFT,
87 };
88 
89 enum qm_mailbox_cmd_v3 {
90 	QM_MB_CM_CLOSE_QM = 0x7,
91 	QM_MB_CMD_CLOSE_QP,
92 	QM_MB_CMD_FLUSH_QM,
93 	QM_MB_CMD_FLUSH_QP,
94 	QM_MB_CMD_SRC = 0xc,
95 	QM_MB_CMD_DST,
96 	QM_MB_CMD_STOP_QM,
97 };
98 
99 struct qm_mailbox {
100 	union {
101 		struct {
102 			uint16_t w0;
103 			uint16_t queue;
104 			uint32_t base_l;
105 			uint32_t base_h;
106 			uint32_t token;
107 		};
108 		uint64_t x[2];
109 	};
110 };
111 
112 struct qm_dfx_registers {
113 	const char *reg_name;
114 	uint32_t reg_offset;
115 };
116 
117 static const struct qm_dfx_registers qm_dfx_regs[] = {
118 	{ .reg_name = "QM_ECC_1BIT_CNT           ", .reg_offset = 0x104000 },
119 	{ .reg_name = "QM_ECC_MBIT_CNT           ", .reg_offset = 0x104008 },
120 	{ .reg_name = "QM_DFX_MB_CNT             ", .reg_offset = 0x104018 },
121 	{ .reg_name = "QM_DFX_DB_CNT             ", .reg_offset = 0x104028 },
122 	{ .reg_name = "QM_DFX_SQE_CNT            ", .reg_offset = 0x104038 },
123 	{ .reg_name = "QM_DFX_CQE_CNT            ", .reg_offset = 0x104048 },
124 	{ .reg_name = "QM_DFX_SEND_SQE_TO_ACC_CNT", .reg_offset = 0x104050 },
125 	{ .reg_name = "QM_DFX_WB_SQE_FROM_ACC_CNT", .reg_offset = 0x104058 },
126 	{ .reg_name = "QM_DFX_ACC_FINISH_CNT     ", .reg_offset = 0x104060 },
127 	{ .reg_name = "QM_DFX_CQE_ERR_CNT        ", .reg_offset = 0x1040b4 },
128 	{ }
129 };
130 
131 void hisi_qm_get_version(struct hisi_qm *qm)
132 {
133 	qm->version = io_read32(qm->io_base + HISI_QM_REVISON_ID_BASE) &
134 		      HISI_QM_REVISON_ID_MASK;
135 }
136 
137 static void qm_db(struct hisi_qm *qm, uint16_t qn, uint8_t cmd, uint16_t index,
138 		  uint8_t priority)
139 {
140 	uint64_t doorbell = 0;
141 
142 	doorbell = qn | SHIFT_U64(cmd, QM_DB_CMD_SHIFT) |
143 		   SHIFT_U64(QM_DB_RAND_DATA, QM_DB_RAND_DATA_SHIFT) |
144 		   SHIFT_U64(index, QM_DB_INDEX_SHIFT) |
145 		   SHIFT_U64(priority, QM_DB_PRIORITY_SHIFT);
146 
147 	io_write64(qm->io_base + QM_DOORBELL_SQ_CQ_BASE, doorbell);
148 }
149 
150 static void qm_mb_write(struct hisi_qm *qm, struct qm_mailbox *mb)
151 {
152 	vaddr_t dst = qm->io_base + QM_MAILBOX_BASE;
153 
154 	write_64bit_pair(dst, mb->x[1], mb->x[0]);
155 	dsb_osh();
156 }
157 
158 static void qm_mb_read(struct hisi_qm *qm, struct qm_mailbox *mb)
159 {
160 	vaddr_t mb_base = qm->io_base + QM_MAILBOX_BASE;
161 
162 	read_64bit_pair(mb_base, mb->x + 1, mb->x);
163 	dsb_osh();
164 }
165 
166 static enum hisi_drv_status qm_wait_mb_ready(struct hisi_qm *qm)
167 {
168 	struct qm_mailbox mb = { };
169 	uint32_t timeout = 0;
170 
171 	timeout = timeout_init_us(QM_MB_WAIT_PERIOD * QM_MB_WAIT_READY_CNT);
172 	while (!timeout_elapsed(timeout)) {
173 		/* 128 bits should be read from hardware at one time*/
174 		qm_mb_read(qm, &mb);
175 		if (!(mb.w0 & QM_MB_BUSY_BIT))
176 			return HISI_QM_DRVCRYPT_NO_ERR;
177 	}
178 
179 	EMSG("QM mailbox is busy to start!");
180 
181 	return HISI_QM_DRVCRYPT_EBUSY;
182 }
183 
184 static enum hisi_drv_status qm_wait_mb_finish(struct hisi_qm *qm,
185 					      struct qm_mailbox *mb)
186 {
187 	uint32_t timeout = 0;
188 
189 	timeout = timeout_init_us(QM_MB_WAIT_PERIOD * QM_MB_WAIT_MAX_CNT);
190 	while (!timeout_elapsed(timeout)) {
191 		qm_mb_read(qm, mb);
192 		if (!(mb->w0 & QM_MB_BUSY_BIT)) {
193 			if (mb->w0 & QM_MB_STATUS_MASK) {
194 				EMSG("QM mailbox operation failed!");
195 				return HISI_QM_DRVCRYPT_EIO;
196 			} else {
197 				return HISI_QM_DRVCRYPT_NO_ERR;
198 			}
199 		}
200 	}
201 
202 	return HISI_QM_DRVCRYPT_ETMOUT;
203 }
204 
205 static void qm_mb_init(struct qm_mailbox *mb, uint8_t cmd, uint64_t base,
206 		       uint16_t qnum, uint8_t op)
207 {
208 	mb->w0 = cmd | SHIFT_U32(op, QM_MB_OP_SHIFT) |  QM_MB_BUSY_BIT;
209 	mb->queue = qnum;
210 	reg_pair_from_64(base, &mb->base_h, &mb->base_l);
211 	mb->token = 0;
212 }
213 
214 static enum hisi_drv_status qm_mb_nolock(struct hisi_qm *qm,
215 					 struct qm_mailbox *mb)
216 {
217 	if (qm_wait_mb_ready(qm))
218 		return HISI_QM_DRVCRYPT_EBUSY;
219 
220 	qm_mb_write(qm, mb);
221 
222 	return qm_wait_mb_finish(qm, mb);
223 }
224 
225 static enum hisi_drv_status hisi_qm_mb_write(struct hisi_qm *qm, uint8_t cmd,
226 					     uintptr_t dma_addr, uint16_t qnum)
227 {
228 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
229 	struct qm_mailbox mb = { };
230 
231 	qm_mb_init(&mb, cmd, dma_addr, qnum, QM_MB_OP_WR);
232 	mutex_lock(&qm->mailbox_lock);
233 	ret = qm_mb_nolock(qm, &mb);
234 	mutex_unlock(&qm->mailbox_lock);
235 
236 	return ret;
237 }
238 
239 static enum hisi_drv_status hisi_qm_mb_read(struct hisi_qm *qm, uint64_t *base,
240 					    uint8_t cmd, uint16_t qnum)
241 {
242 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
243 	struct qm_mailbox mb = { };
244 
245 	qm_mb_init(&mb, cmd, 0, qnum, QM_MB_OP_RD);
246 	mutex_lock(&qm->mailbox_lock);
247 	ret = qm_mb_nolock(qm, &mb);
248 	mutex_unlock(&qm->mailbox_lock);
249 	if (ret)
250 		return ret;
251 
252 	reg_pair_from_64(*base, &mb.base_h, &mb.base_l);
253 
254 	return HISI_QM_DRVCRYPT_NO_ERR;
255 }
256 
257 static void qm_cfg_vft_data(struct hisi_qm *qm, uint8_t vft_type,
258 			    uint32_t base, uint32_t number)
259 {
260 	uint32_t data_h = 0;
261 	uint32_t data_l = 0;
262 	uint64_t data = 0;
263 
264 	switch (vft_type) {
265 	case QM_SQC_VFT:
266 		data = SHIFT_U64(base, QM_SQC_VFT_START_SQN_SHIFT) |
267 			QM_SQC_VFT_VALID |
268 			SHIFT_U64((number - 1), QM_SQC_VFT_SQ_NUM_SHIFT);
269 		break;
270 	case QM_CQC_VFT:
271 		data = QM_CQC_VFT_VALID;
272 		break;
273 	default:
274 		panic("Invalid vft type");
275 	}
276 
277 	reg_pair_from_64(data, &data_h, &data_l);
278 	io_write32(qm->io_base + QM_VFT_CFG_DATA_L, data_l);
279 	io_write32(qm->io_base + QM_VFT_CFG_DATA_H, data_h);
280 }
281 
282 static enum hisi_drv_status qm_set_vft_common(struct hisi_qm *qm,
283 					      uint8_t vft_type,
284 					      uint32_t function,
285 					      uint32_t base,
286 					      uint32_t num)
287 {
288 	uint32_t val = 0;
289 
290 	if (IO_READ32_POLL_TIMEOUT(qm->io_base + QM_VFT_CFG_RDY, val,
291 				   val & QM_FVT_CFG_RDY_BIT, POLL_PERIOD,
292 				   POLL_TIMEOUT)) {
293 		EMSG("QM VFT is not ready");
294 		return HISI_QM_DRVCRYPT_EBUSY;
295 	}
296 
297 	io_write32(qm->io_base + QM_VFT_CFG_OP_WR, QM_VFT_WRITE);
298 	io_write32(qm->io_base + QM_VFT_CFG_TYPE, vft_type);
299 	io_write32(qm->io_base + QM_VFT_CFG_ADDRESS, function);
300 	qm_cfg_vft_data(qm, vft_type, base, num);
301 	io_write32(qm->io_base + QM_VFT_CFG_RDY, 0x0);
302 	io_write32(qm->io_base + QM_VFT_CFG_OP_ENABLE, QM_FVT_CFG_RDY_BIT);
303 
304 	if (IO_READ32_POLL_TIMEOUT(qm->io_base + QM_VFT_CFG_RDY, val,
305 				   val & QM_FVT_CFG_RDY_BIT, POLL_PERIOD,
306 				   POLL_TIMEOUT)) {
307 		EMSG("QM VFT is not ready");
308 		return HISI_QM_DRVCRYPT_EBUSY;
309 	}
310 
311 	return HISI_QM_DRVCRYPT_NO_ERR;
312 }
313 
314 static enum hisi_drv_status qm_set_xqc_vft(struct hisi_qm *qm,
315 					   uint32_t function,
316 					   uint32_t base, uint32_t num)
317 {
318 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
319 	int i = 0;
320 
321 	if (!num) {
322 		EMSG("Invalid sq num");
323 		return HISI_QM_DRVCRYPT_EINVAL;
324 	}
325 
326 	for (i = QM_SQC_VFT; i <= QM_CQC_VFT; i++) {
327 		ret = qm_set_vft_common(qm, i, function, base, num);
328 		if (ret) {
329 			EMSG("QM set type %d fail", i);
330 			return ret;
331 		}
332 	}
333 
334 	return HISI_QM_DRVCRYPT_NO_ERR;
335 }
336 
337 static enum hisi_drv_status qm_get_vft(struct hisi_qm *qm, uint32_t *base,
338 				       uint32_t *num)
339 {
340 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
341 	uint64_t sqc_vft = 0;
342 
343 	ret = hisi_qm_mb_read(qm, &sqc_vft, QM_MB_CMD_SQC_VFT, 0);
344 	if (ret)
345 		return ret;
346 
347 	*base = (sqc_vft >> QM_SQC_VFT_START_SQN_SHIFT) & QM_SQC_VFT_BASE_MASK;
348 	*num = ((sqc_vft >> QM_SQC_VFT_SQ_NUM_SHIFT) & QM_SQC_VFT_NUM_MASK) + 1;
349 
350 	return HISI_QM_DRVCRYPT_NO_ERR;
351 }
352 
353 static void qp_free(struct hisi_qm *qm, uint32_t id)
354 {
355 	struct hisi_qp *qp = &qm->qp_array[id];
356 
357 	free(qp->sqe);
358 	free(qp->cqe);
359 }
360 
361 static enum hisi_drv_status qp_alloc(struct hisi_qm *qm, uint32_t id)
362 {
363 	size_t sq_size = qm->sqe_size * HISI_QM_Q_DEPTH;
364 	size_t cq_size = sizeof(struct qm_cqe) * HISI_QM_Q_DEPTH;
365 	struct hisi_qp *qp = &qm->qp_array[id];
366 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
367 
368 	qp->sqe = memalign(HISI_QM_ALIGN128, sq_size);
369 	if (!qp->sqe) {
370 		EMSG("Fail to malloc sq[%"PRIu32"]", id);
371 		return HISI_QM_DRVCRYPT_ENOMEM;
372 	}
373 	qp->sqe_dma = virt_to_phys(qp->sqe);
374 	qp->cqe = memalign(HISI_QM_ALIGN32, cq_size);
375 	if (!qp->cqe) {
376 		EMSG("Fail to malloc cq[%"PRIu32"]", id);
377 		ret = HISI_QM_DRVCRYPT_ENOMEM;
378 		goto free_sqe;
379 	}
380 	qp->cqe_dma = virt_to_phys(qp->cqe);
381 
382 	qp->qp_id = id;
383 	qp->qm = qm;
384 	return HISI_QM_DRVCRYPT_NO_ERR;
385 
386 free_sqe:
387 	free(qp->sqe);
388 	return ret;
389 }
390 
391 static void hisi_qm_free_xqc(struct qm_xqc *xqc)
392 {
393 	free(xqc->cqc);
394 	free(xqc->sqc);
395 }
396 
397 static void qm_free(struct hisi_qm *qm)
398 {
399 	unsigned int i = 0;
400 
401 	for (i = 0; i < qm->qp_num; i++)
402 		qp_free(qm, i);
403 
404 	free(qm->qp_array);
405 	hisi_qm_free_xqc(&qm->xqc);
406 	hisi_qm_free_xqc(&qm->cfg_xqc);
407 }
408 
409 static enum hisi_drv_status hisi_qm_alloc_xqc(struct qm_xqc *xqc,
410 					      uint32_t qp_num)
411 {
412 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
413 	size_t sqc_size = 0;
414 	size_t cqc_size = 0;
415 
416 	sqc_size = sizeof(struct qm_sqc) * qp_num;
417 	cqc_size = sizeof(struct qm_cqc) * qp_num;
418 
419 	xqc->sqc = memalign(HISI_QM_ALIGN32, sqc_size);
420 	if (!xqc->sqc) {
421 		EMSG("Fail to malloc sqc");
422 		return HISI_QM_DRVCRYPT_ENOMEM;
423 	}
424 	memset(xqc->sqc, 0, sqc_size);
425 	xqc->sqc_dma = virt_to_phys(xqc->sqc);
426 
427 	xqc->cqc = memalign(HISI_QM_ALIGN32, cqc_size);
428 	if (!xqc->cqc) {
429 		EMSG("Fail to malloc cqc");
430 		ret = HISI_QM_DRVCRYPT_ENOMEM;
431 		goto free_sqc;
432 	}
433 	memset(xqc->cqc, 0, cqc_size);
434 	xqc->cqc_dma = virt_to_phys(xqc->cqc);
435 
436 	return HISI_QM_DRVCRYPT_NO_ERR;
437 
438 	free(xqc->cqc);
439 free_sqc:
440 	free(xqc->sqc);
441 	return ret;
442 }
443 
444 static enum hisi_drv_status qm_alloc(struct hisi_qm *qm)
445 {
446 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
447 	int32_t j;
448 	uint32_t i;
449 
450 	ret = hisi_qm_alloc_xqc(&qm->xqc, qm->qp_num);
451 	if (ret)
452 		return ret;
453 
454 	ret = hisi_qm_alloc_xqc(&qm->cfg_xqc, 1);
455 	if (ret)
456 		goto free_xqc;
457 
458 	qm->qp_array = calloc(qm->qp_num, sizeof(struct hisi_qp));
459 	if (!qm->qp_array) {
460 		EMSG("Fail to malloc qp_array");
461 		ret = HISI_QM_DRVCRYPT_ENOMEM;
462 		goto free_cfg_xqc;
463 	}
464 
465 	for (i = 0; i < qm->qp_num; i++) {
466 		ret = qp_alloc(qm, i);
467 		if (ret)
468 			goto free_qp_mem;
469 	}
470 
471 	return HISI_QM_DRVCRYPT_NO_ERR;
472 
473 free_qp_mem:
474 	for (j = (int)i - 1; j >= 0; j--)
475 		qp_free(qm, j);
476 	free(qm->qp_array);
477 free_cfg_xqc:
478 	hisi_qm_free_xqc(&qm->cfg_xqc);
479 free_xqc:
480 	hisi_qm_free_xqc(&qm->xqc);
481 	return ret;
482 }
483 
484 enum hisi_drv_status hisi_qm_init(struct hisi_qm *qm)
485 {
486 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
487 
488 	if (qm->fun_type == HISI_QM_HW_VF) {
489 		ret = qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
490 		if (ret) {
491 			EMSG("Fail to get function vft config");
492 			return ret;
493 		}
494 	}
495 
496 	if (!qm->qp_num || !qm->sqe_size) {
497 		EMSG("Invalid QM parameters");
498 		return HISI_QM_DRVCRYPT_EINVAL;
499 	}
500 
501 	ret = qm_alloc(qm);
502 	if (ret)
503 		return ret;
504 
505 	qm->qp_in_used = 0;
506 	qm->qp_idx = 0;
507 	mutex_init(&qm->qp_lock);
508 	mutex_init(&qm->mailbox_lock);
509 
510 	return HISI_QM_DRVCRYPT_NO_ERR;
511 }
512 
513 static void qm_cache_writeback(struct hisi_qm *qm)
514 {
515 	uint32_t val = 0;
516 
517 	io_write32(qm->io_base + QM_CACHE_WB_START, QM_FVT_CFG_RDY_BIT);
518 
519 	if (IO_READ32_POLL_TIMEOUT(qm->io_base + QM_CACHE_WB_DONE, val,
520 				   val & QM_FVT_CFG_RDY_BIT, POLL_PERIOD,
521 				   POLL_TIMEOUT))
522 		panic("QM writeback sqc cache fail");
523 }
524 
525 void hisi_qm_uninit(struct hisi_qm *qm)
526 {
527 	qm_cache_writeback(qm);
528 	qm_free(qm);
529 	mutex_destroy(&qm->qp_lock);
530 	mutex_destroy(&qm->mailbox_lock);
531 }
532 
533 static enum hisi_drv_status qm_hw_mem_reset(struct hisi_qm *qm)
534 {
535 	uint32_t val = 0;
536 
537 	io_write32(qm->io_base + QM_MEM_START_INIT, QM_FVT_CFG_RDY_BIT);
538 
539 	if (IO_READ32_POLL_TIMEOUT(qm->io_base + QM_MEM_INIT_DONE, val,
540 				   val & QM_FVT_CFG_RDY_BIT, POLL_PERIOD,
541 				   POLL_TIMEOUT))
542 		return HISI_QM_DRVCRYPT_EBUSY;
543 
544 	return HISI_QM_DRVCRYPT_NO_ERR;
545 }
546 
547 static enum hisi_drv_status qm_func_vft_cfg(struct hisi_qm *qm)
548 {
549 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
550 	uint32_t q_base = qm->qp_num;
551 	uint32_t act_q_num = 0;
552 	unsigned int i = 0;
553 	unsigned int j = 0;
554 
555 	if (!qm->vfs_num)
556 		return HISI_QM_DRVCRYPT_NO_ERR;
557 
558 	if (qm->vfs_num > HISI_QM_MAX_VFS_NUM) {
559 		EMSG("Invalid QM vfs_num");
560 		return HISI_QM_DRVCRYPT_EINVAL;
561 	}
562 
563 	for (i = 1; i <= qm->vfs_num; i++) {
564 		act_q_num = HISI_QM_VF_Q_NUM;
565 		ret = qm_set_xqc_vft(qm, i, q_base, act_q_num);
566 		if (ret) {
567 			for (j = 1; j < i; j++)
568 				(void)qm_set_xqc_vft(qm, j, 0, 0);
569 			return ret;
570 		}
571 		q_base += act_q_num;
572 	}
573 
574 	return HISI_QM_DRVCRYPT_NO_ERR;
575 }
576 
577 enum hisi_drv_status hisi_qm_start(struct hisi_qm *qm)
578 {
579 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
580 
581 	if (qm->fun_type == HISI_QM_HW_PF) {
582 		ret = qm_hw_mem_reset(qm);
583 		if (ret) {
584 			EMSG("Fail to reset QM hardware mem");
585 			return ret;
586 		}
587 
588 		ret = qm_set_xqc_vft(qm, 0, qm->qp_base, qm->qp_num);
589 		if (ret) {
590 			EMSG("Fail to set PF xqc_vft");
591 			return ret;
592 		}
593 
594 		ret = qm_func_vft_cfg(qm);
595 		if (ret) {
596 			EMSG("Fail to set VF xqc_vft");
597 			return ret;
598 		}
599 	}
600 
601 	ret = hisi_qm_mb_write(qm, QM_MB_CMD_SQC_BT, qm->xqc.sqc_dma, 0);
602 	if (ret) {
603 		EMSG("Fail to set sqc_bt");
604 		return ret;
605 	}
606 
607 	ret = hisi_qm_mb_write(qm, QM_MB_CMD_CQC_BT, qm->xqc.cqc_dma, 0);
608 	if (ret) {
609 		EMSG("Fail to set cqc_bt");
610 		return ret;
611 	}
612 
613 	/* Security mode does not support msi */
614 	io_write32(qm->io_base + QM_VF_AEQ_INT_MASK, QM_VF_AEQ_INT_MASK_EN);
615 	io_write32(qm->io_base + QM_VF_EQ_INT_MASK, QM_VF_EQ_INT_MASK_EN);
616 
617 	return HISI_QM_DRVCRYPT_NO_ERR;
618 }
619 
620 void hisi_qm_dev_init(struct hisi_qm *qm)
621 {
622 	if (qm->fun_type == HISI_QM_HW_VF)
623 		return;
624 
625 	/* QM user domain */
626 	io_write32(qm->io_base + QM_ARUSER_M_CFG_1, QM_AXUSER_CFG);
627 	io_write32(qm->io_base + QM_ARUSER_M_CFG_ENABLE, AXUSER_M_CFG_ENABLE);
628 	io_write32(qm->io_base + QM_AWUSER_M_CFG_1, QM_AXUSER_CFG);
629 	io_write32(qm->io_base + QM_AWUSER_M_CFG_ENABLE, AXUSER_M_CFG_ENABLE);
630 	/* QM cache */
631 	io_write32(qm->io_base + QM_AXI_M_CFG, AXI_M_CFG);
632 
633 	if (qm->version == HISI_QM_HW_V2) {
634 		/* Disable FLR triggered by BME(bus master enable) */
635 		io_write32(qm->io_base + QM_PEH_AXUSER_CFG, PEH_AXUSER_CFG);
636 		/* Set sec sqc and cqc cache wb threshold 4 */
637 		io_write32(qm->io_base + QM_CACHE_CTL, QM_CACHE_CFG);
638 	}
639 	/* Disable QM ras */
640 	io_write32(qm->io_base + HISI_QM_ABNML_INT_MASK,
641 		   HISI_QM_ABNML_INT_MASK_CFG);
642 }
643 
644 static enum hisi_drv_status qm_sqc_cfg(struct hisi_qp *qp)
645 {
646 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
647 	struct hisi_qm *qm = qp->qm;
648 	struct qm_sqc *sqc = qm->cfg_xqc.sqc;
649 	struct qm_mailbox mb = { };
650 
651 	mutex_lock(&qm->mailbox_lock);
652 	memset(sqc, 0, sizeof(struct qm_sqc));
653 	reg_pair_from_64(qp->sqe_dma, &sqc->base_h, &sqc->base_l);
654 	sqc->dw3 = (HISI_QM_Q_DEPTH - 1) |
655 		    SHIFT_U32(qm->sqe_log2_size, QM_SQ_SQE_SIZE_SHIFT);
656 	sqc->rand_data = QM_DB_RAND_DATA;
657 	sqc->cq_num = qp->qp_id;
658 	sqc->w13 = BIT32(QM_SQ_ORDER_SHIFT) |
659 		   SHIFT_U32(qp->sq_type, QM_SQ_TYPE_SHIFT);
660 
661 	qm_mb_init(&mb, QM_MB_CMD_SQC, qm->cfg_xqc.sqc_dma, qp->qp_id,
662 		   QM_MB_OP_WR);
663 	ret = qm_mb_nolock(qm, &mb);
664 	mutex_unlock(&qm->mailbox_lock);
665 
666 	return ret;
667 }
668 
669 static enum hisi_drv_status qm_cqc_cfg(struct hisi_qp *qp)
670 {
671 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
672 	struct hisi_qm *qm = qp->qm;
673 	struct qm_cqc *cqc = qm->cfg_xqc.cqc;
674 	struct qm_mailbox mb = { };
675 
676 	mutex_lock(&qm->mailbox_lock);
677 	memset(cqc, 0, sizeof(struct qm_cqc));
678 	reg_pair_from_64(qp->cqe_dma, &cqc->base_h, &cqc->base_l);
679 	cqc->dw3 = (HISI_QM_Q_DEPTH - 1) |
680 		    SHIFT_U32(QM_CQE_SIZE, QM_CQ_CQE_SIZE_SHIFT);
681 	cqc->rand_data = QM_DB_RAND_DATA;
682 	cqc->dw6 = PHASE_DEFAULT_VAL;
683 
684 	qm_mb_init(&mb, QM_MB_CMD_CQC, qm->cfg_xqc.cqc_dma, qp->qp_id,
685 		   QM_MB_OP_WR);
686 	ret = qm_mb_nolock(qm, &mb);
687 	mutex_unlock(&qm->mailbox_lock);
688 
689 	return ret;
690 }
691 
692 struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, uint8_t sq_type)
693 {
694 	struct hisi_qp *qp = NULL;
695 	int cur_idx = 0;
696 	uint32_t i = 0;
697 
698 	mutex_lock(&qm->qp_lock);
699 	if (qm->qp_in_used == qm->qp_num) {
700 		EMSG("All %"PRIu32" queues of QM are busy", qm->qp_num);
701 		goto err_proc;
702 	}
703 
704 	for (i = 0; i < qm->qp_num; i++) {
705 		cur_idx = (qm->qp_idx + i) % qm->qp_num;
706 		if (!qm->qp_array[cur_idx].used) {
707 			qm->qp_array[cur_idx].used = true;
708 			qm->qp_idx = cur_idx + 1;
709 			break;
710 		}
711 	}
712 
713 	qp = qm->qp_array + cur_idx;
714 	memset(qp->cqe, 0, sizeof(struct qm_cqe) * HISI_QM_Q_DEPTH);
715 	qp->sq_type = sq_type;
716 	qp->sq_tail = 0;
717 	qp->cq_head = 0;
718 	qp->cqc_phase = true;
719 
720 	if (qm_sqc_cfg(qp)) {
721 		EMSG("Fail to set qp[%"PRIu32"] sqc", qp->qp_id);
722 		goto err_proc;
723 	}
724 
725 	if (qm_cqc_cfg(qp)) {
726 		EMSG("Fail to set qp[%"PRIu32"] cqc", qp->qp_id);
727 		goto err_proc;
728 	}
729 
730 	qm->qp_in_used++;
731 	mutex_unlock(&qm->qp_lock);
732 	return qp;
733 
734 err_proc:
735 	qp->sq_type = 0;
736 	qp->cqc_phase = false;
737 	mutex_unlock(&qm->qp_lock);
738 	return NULL;
739 }
740 
741 void hisi_qm_release_qp(struct hisi_qp *qp)
742 {
743 	struct hisi_qm *qm = NULL;
744 
745 	if (!qp) {
746 		EMSG("QP is NULL");
747 		return;
748 	}
749 
750 	qm = qp->qm;
751 	mutex_lock(&qm->qp_lock);
752 	qm->qp_in_used--;
753 	qp->used = false;
754 	mutex_unlock(&qm->qp_lock);
755 }
756 
757 static void qm_sq_tail_update(struct hisi_qp *qp)
758 {
759 	if (qp->sq_tail == HISI_QM_Q_DEPTH - 1)
760 		qp->sq_tail = 0;
761 	else
762 		qp->sq_tail++;
763 }
764 
765 /*
766  * One task thread will just bind to one hardware queue, and
767  * hardware does not support msi. So we have no lock here.
768  */
769 enum hisi_drv_status hisi_qp_send(struct hisi_qp *qp, void *msg)
770 {
771 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
772 	struct hisi_qm *qm = NULL;
773 	void *sqe = NULL;
774 
775 	if (!qp) {
776 		EMSG("QP is NULL");
777 		return HISI_QM_DRVCRYPT_EINVAL;
778 	}
779 
780 	qm = qp->qm;
781 	ret = qm->dev_status_check(qm);
782 	if (ret)
783 		return ret;
784 
785 	sqe = (void *)((vaddr_t)qp->sqe + qm->sqe_size * qp->sq_tail);
786 	memset(sqe, 0, qm->sqe_size);
787 
788 	ret = qp->fill_sqe(sqe, msg);
789 	if (ret) {
790 		EMSG("Fail to fill sqe");
791 		return ret;
792 	}
793 
794 	qm_sq_tail_update(qp);
795 
796 	dsb();
797 	qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_SQ, qp->sq_tail, 0);
798 
799 	return HISI_QM_DRVCRYPT_NO_ERR;
800 }
801 
802 static void qm_cq_head_update(struct hisi_qp *qp)
803 {
804 	if (qp->cq_head == HISI_QM_Q_DEPTH - 1) {
805 		qp->cqc_phase = !qp->cqc_phase;
806 		qp->cq_head = 0;
807 	} else {
808 		qp->cq_head++;
809 	}
810 }
811 
812 static enum hisi_drv_status hisi_qp_recv(struct hisi_qp *qp, void *msg)
813 {
814 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
815 	struct hisi_qm *qm = qp->qm;
816 	struct qm_cqe *cqe = NULL;
817 	void *sqe = NULL;
818 
819 	ret = qm->dev_status_check(qm);
820 	if (ret)
821 		return ret;
822 
823 	cqe = qp->cqe + qp->cq_head;
824 	if (QM_CQE_PHASE(cqe) == qp->cqc_phase) {
825 		dsb_osh();
826 		sqe = (void *)((vaddr_t)qp->sqe + qm->sqe_size * cqe->sq_head);
827 		ret = qp->parse_sqe(sqe, msg);
828 		qm_cq_head_update(qp);
829 		qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ, qp->cq_head, 0);
830 		if (ret) {
831 			EMSG("Fail to parse sqe");
832 			return ret;
833 		}
834 	} else {
835 		return HISI_QM_DRVCRYPT_NO_ERR;
836 	}
837 
838 	return HISI_QM_DRVCRYPT_RECV_DONE;
839 }
840 
841 static void qm_dfx_dump(struct hisi_qm *qm)
842 {
843 	const struct qm_dfx_registers *regs = qm_dfx_regs;
844 	__maybe_unused uint32_t val = 0;
845 
846 	if (qm->fun_type == HISI_QM_HW_VF)
847 		return;
848 
849 	while (regs->reg_name) {
850 		val = io_read32(qm->io_base + regs->reg_offset);
851 		EMSG("%s= 0x%" PRIx32, regs->reg_name, val);
852 		regs++;
853 	}
854 }
855 
856 enum hisi_drv_status hisi_qp_recv_sync(struct hisi_qp *qp, void *msg)
857 {
858 	enum hisi_drv_status ret = HISI_QM_DRVCRYPT_NO_ERR;
859 	uint32_t timeout = 0;
860 
861 	if (!qp) {
862 		EMSG("QP is NULL");
863 		return HISI_QM_DRVCRYPT_EINVAL;
864 	}
865 
866 	timeout = timeout_init_us(QM_SINGLE_WAIT_TIME *
867 				  HISI_QM_RECV_SYNC_TIMEOUT);
868 	while (!timeout_elapsed(timeout)) {
869 		ret = hisi_qp_recv(qp, msg);
870 		if (ret) {
871 			if (ret != HISI_QM_DRVCRYPT_RECV_DONE) {
872 				EMSG("QM recv task error");
873 				qm_dfx_dump(qp->qm);
874 				return ret;
875 			} else {
876 				return HISI_QM_DRVCRYPT_NO_ERR;
877 			}
878 		}
879 	}
880 
881 	EMSG("QM recv task timeout");
882 	qm_dfx_dump(qp->qm);
883 	return HISI_QM_DRVCRYPT_ETMOUT;
884 }
885