xref: /OK3568_Linux_fs/kernel/drivers/crypto/hisilicon/qm.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright (c) 2019 HiSilicon Limited. */
3*4882a593Smuzhiyun #include <asm/page.h>
4*4882a593Smuzhiyun #include <linux/acpi.h>
5*4882a593Smuzhiyun #include <linux/aer.h>
6*4882a593Smuzhiyun #include <linux/bitmap.h>
7*4882a593Smuzhiyun #include <linux/debugfs.h>
8*4882a593Smuzhiyun #include <linux/dma-mapping.h>
9*4882a593Smuzhiyun #include <linux/idr.h>
10*4882a593Smuzhiyun #include <linux/io.h>
11*4882a593Smuzhiyun #include <linux/irqreturn.h>
12*4882a593Smuzhiyun #include <linux/log2.h>
13*4882a593Smuzhiyun #include <linux/seq_file.h>
14*4882a593Smuzhiyun #include <linux/slab.h>
15*4882a593Smuzhiyun #include <linux/uacce.h>
16*4882a593Smuzhiyun #include <linux/uaccess.h>
17*4882a593Smuzhiyun #include <uapi/misc/uacce/hisi_qm.h>
18*4882a593Smuzhiyun #include "qm.h"
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun /* eq/aeq irq enable */
21*4882a593Smuzhiyun #define QM_VF_AEQ_INT_SOURCE		0x0
22*4882a593Smuzhiyun #define QM_VF_AEQ_INT_MASK		0x4
23*4882a593Smuzhiyun #define QM_VF_EQ_INT_SOURCE		0x8
24*4882a593Smuzhiyun #define QM_VF_EQ_INT_MASK		0xc
25*4882a593Smuzhiyun #define QM_IRQ_NUM_V1			1
26*4882a593Smuzhiyun #define QM_IRQ_NUM_PF_V2		4
27*4882a593Smuzhiyun #define QM_IRQ_NUM_VF_V2		2
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun #define QM_EQ_EVENT_IRQ_VECTOR		0
30*4882a593Smuzhiyun #define QM_AEQ_EVENT_IRQ_VECTOR		1
31*4882a593Smuzhiyun #define QM_ABNORMAL_EVENT_IRQ_VECTOR	3
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun /* mailbox */
34*4882a593Smuzhiyun #define QM_MB_CMD_SQC			0x0
35*4882a593Smuzhiyun #define QM_MB_CMD_CQC			0x1
36*4882a593Smuzhiyun #define QM_MB_CMD_EQC			0x2
37*4882a593Smuzhiyun #define QM_MB_CMD_AEQC			0x3
38*4882a593Smuzhiyun #define QM_MB_CMD_SQC_BT		0x4
39*4882a593Smuzhiyun #define QM_MB_CMD_CQC_BT		0x5
40*4882a593Smuzhiyun #define QM_MB_CMD_SQC_VFT_V2		0x6
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun #define QM_MB_CMD_SEND_BASE		0x300
43*4882a593Smuzhiyun #define QM_MB_EVENT_SHIFT		8
44*4882a593Smuzhiyun #define QM_MB_BUSY_SHIFT		13
45*4882a593Smuzhiyun #define QM_MB_OP_SHIFT			14
46*4882a593Smuzhiyun #define QM_MB_CMD_DATA_ADDR_L		0x304
47*4882a593Smuzhiyun #define QM_MB_CMD_DATA_ADDR_H		0x308
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun /* sqc shift */
50*4882a593Smuzhiyun #define QM_SQ_HOP_NUM_SHIFT		0
51*4882a593Smuzhiyun #define QM_SQ_PAGE_SIZE_SHIFT		4
52*4882a593Smuzhiyun #define QM_SQ_BUF_SIZE_SHIFT		8
53*4882a593Smuzhiyun #define QM_SQ_SQE_SIZE_SHIFT		12
54*4882a593Smuzhiyun #define QM_SQ_PRIORITY_SHIFT		0
55*4882a593Smuzhiyun #define QM_SQ_ORDERS_SHIFT		4
56*4882a593Smuzhiyun #define QM_SQ_TYPE_SHIFT		8
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun #define QM_SQ_TYPE_MASK			GENMASK(3, 0)
59*4882a593Smuzhiyun #define QM_SQ_TAIL_IDX(sqc)		((le16_to_cpu((sqc)->w11) >> 6) & 0x1)
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun /* cqc shift */
62*4882a593Smuzhiyun #define QM_CQ_HOP_NUM_SHIFT		0
63*4882a593Smuzhiyun #define QM_CQ_PAGE_SIZE_SHIFT		4
64*4882a593Smuzhiyun #define QM_CQ_BUF_SIZE_SHIFT		8
65*4882a593Smuzhiyun #define QM_CQ_CQE_SIZE_SHIFT		12
66*4882a593Smuzhiyun #define QM_CQ_PHASE_SHIFT		0
67*4882a593Smuzhiyun #define QM_CQ_FLAG_SHIFT		1
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun #define QM_CQE_PHASE(cqe)		(le16_to_cpu((cqe)->w7) & 0x1)
70*4882a593Smuzhiyun #define QM_QC_CQE_SIZE			4
71*4882a593Smuzhiyun #define QM_CQ_TAIL_IDX(cqc)		((le16_to_cpu((cqc)->w11) >> 6) & 0x1)
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun /* eqc shift */
74*4882a593Smuzhiyun #define QM_EQE_AEQE_SIZE		(2UL << 12)
75*4882a593Smuzhiyun #define QM_EQC_PHASE_SHIFT		16
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun #define QM_EQE_PHASE(eqe)		((le32_to_cpu((eqe)->dw0) >> 16) & 0x1)
78*4882a593Smuzhiyun #define QM_EQE_CQN_MASK			GENMASK(15, 0)
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun #define QM_AEQE_PHASE(aeqe)		((le32_to_cpu((aeqe)->dw0) >> 16) & 0x1)
81*4882a593Smuzhiyun #define QM_AEQE_TYPE_SHIFT		17
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun #define QM_DOORBELL_CMD_SQ		0
84*4882a593Smuzhiyun #define QM_DOORBELL_CMD_CQ		1
85*4882a593Smuzhiyun #define QM_DOORBELL_CMD_EQ		2
86*4882a593Smuzhiyun #define QM_DOORBELL_CMD_AEQ		3
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun #define QM_DOORBELL_BASE_V1		0x340
89*4882a593Smuzhiyun #define QM_DB_CMD_SHIFT_V1		16
90*4882a593Smuzhiyun #define QM_DB_INDEX_SHIFT_V1		32
91*4882a593Smuzhiyun #define QM_DB_PRIORITY_SHIFT_V1		48
92*4882a593Smuzhiyun #define QM_DOORBELL_SQ_CQ_BASE_V2	0x1000
93*4882a593Smuzhiyun #define QM_DOORBELL_EQ_AEQ_BASE_V2	0x2000
94*4882a593Smuzhiyun #define QM_DB_CMD_SHIFT_V2		12
95*4882a593Smuzhiyun #define QM_DB_RAND_SHIFT_V2		16
96*4882a593Smuzhiyun #define QM_DB_INDEX_SHIFT_V2		32
97*4882a593Smuzhiyun #define QM_DB_PRIORITY_SHIFT_V2		48
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun #define QM_MEM_START_INIT		0x100040
100*4882a593Smuzhiyun #define QM_MEM_INIT_DONE		0x100044
101*4882a593Smuzhiyun #define QM_VFT_CFG_RDY			0x10006c
102*4882a593Smuzhiyun #define QM_VFT_CFG_OP_WR		0x100058
103*4882a593Smuzhiyun #define QM_VFT_CFG_TYPE			0x10005c
104*4882a593Smuzhiyun #define QM_SQC_VFT			0x0
105*4882a593Smuzhiyun #define QM_CQC_VFT			0x1
106*4882a593Smuzhiyun #define QM_VFT_CFG			0x100060
107*4882a593Smuzhiyun #define QM_VFT_CFG_OP_ENABLE		0x100054
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun #define QM_VFT_CFG_DATA_L		0x100064
110*4882a593Smuzhiyun #define QM_VFT_CFG_DATA_H		0x100068
111*4882a593Smuzhiyun #define QM_SQC_VFT_BUF_SIZE		(7ULL << 8)
112*4882a593Smuzhiyun #define QM_SQC_VFT_SQC_SIZE		(5ULL << 12)
113*4882a593Smuzhiyun #define QM_SQC_VFT_INDEX_NUMBER		(1ULL << 16)
114*4882a593Smuzhiyun #define QM_SQC_VFT_START_SQN_SHIFT	28
115*4882a593Smuzhiyun #define QM_SQC_VFT_VALID		(1ULL << 44)
116*4882a593Smuzhiyun #define QM_SQC_VFT_SQN_SHIFT		45
117*4882a593Smuzhiyun #define QM_CQC_VFT_BUF_SIZE		(7ULL << 8)
118*4882a593Smuzhiyun #define QM_CQC_VFT_SQC_SIZE		(5ULL << 12)
119*4882a593Smuzhiyun #define QM_CQC_VFT_INDEX_NUMBER		(1ULL << 16)
120*4882a593Smuzhiyun #define QM_CQC_VFT_VALID		(1ULL << 28)
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun #define QM_SQC_VFT_BASE_SHIFT_V2	28
123*4882a593Smuzhiyun #define QM_SQC_VFT_BASE_MASK_V2		GENMASK(5, 0)
124*4882a593Smuzhiyun #define QM_SQC_VFT_NUM_SHIFT_V2		45
125*4882a593Smuzhiyun #define QM_SQC_VFT_NUM_MASK_v2		GENMASK(9, 0)
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun #define QM_DFX_CNT_CLR_CE		0x100118
128*4882a593Smuzhiyun 
129*4882a593Smuzhiyun #define QM_ABNORMAL_INT_SOURCE		0x100000
130*4882a593Smuzhiyun #define QM_ABNORMAL_INT_SOURCE_CLR	GENMASK(12, 0)
131*4882a593Smuzhiyun #define QM_ABNORMAL_INT_MASK		0x100004
132*4882a593Smuzhiyun #define QM_ABNORMAL_INT_MASK_VALUE	0x1fff
133*4882a593Smuzhiyun #define QM_ABNORMAL_INT_STATUS		0x100008
134*4882a593Smuzhiyun #define QM_ABNORMAL_INT_SET		0x10000c
135*4882a593Smuzhiyun #define QM_ABNORMAL_INF00		0x100010
136*4882a593Smuzhiyun #define QM_FIFO_OVERFLOW_TYPE		0xc0
137*4882a593Smuzhiyun #define QM_FIFO_OVERFLOW_TYPE_SHIFT	6
138*4882a593Smuzhiyun #define QM_FIFO_OVERFLOW_VF		0x3f
139*4882a593Smuzhiyun #define QM_ABNORMAL_INF01		0x100014
140*4882a593Smuzhiyun #define QM_DB_TIMEOUT_TYPE		0xc0
141*4882a593Smuzhiyun #define QM_DB_TIMEOUT_TYPE_SHIFT	6
142*4882a593Smuzhiyun #define QM_DB_TIMEOUT_VF		0x3f
143*4882a593Smuzhiyun #define QM_RAS_CE_ENABLE		0x1000ec
144*4882a593Smuzhiyun #define QM_RAS_FE_ENABLE		0x1000f0
145*4882a593Smuzhiyun #define QM_RAS_NFE_ENABLE		0x1000f4
146*4882a593Smuzhiyun #define QM_RAS_CE_THRESHOLD		0x1000f8
147*4882a593Smuzhiyun #define QM_RAS_CE_TIMES_PER_IRQ		1
148*4882a593Smuzhiyun #define QM_RAS_MSI_INT_SEL		0x1040f4
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun #define QM_DEV_RESET_FLAG		0
151*4882a593Smuzhiyun #define QM_RESET_WAIT_TIMEOUT		400
152*4882a593Smuzhiyun #define QM_PEH_VENDOR_ID		0x1000d8
153*4882a593Smuzhiyun #define ACC_VENDOR_ID_VALUE		0x5a5a
154*4882a593Smuzhiyun #define QM_PEH_DFX_INFO0		0x1000fc
155*4882a593Smuzhiyun #define ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT	3
156*4882a593Smuzhiyun #define ACC_PEH_MSI_DISABLE		GENMASK(31, 0)
157*4882a593Smuzhiyun #define ACC_MASTER_GLOBAL_CTRL_SHUTDOWN	0x1
158*4882a593Smuzhiyun #define ACC_MASTER_TRANS_RETURN_RW	3
159*4882a593Smuzhiyun #define ACC_MASTER_TRANS_RETURN		0x300150
160*4882a593Smuzhiyun #define ACC_MASTER_GLOBAL_CTRL		0x300000
161*4882a593Smuzhiyun #define ACC_AM_CFG_PORT_WR_EN		0x30001c
162*4882a593Smuzhiyun #define QM_RAS_NFE_MBIT_DISABLE		~QM_ECC_MBIT
163*4882a593Smuzhiyun #define ACC_AM_ROB_ECC_INT_STS		0x300104
164*4882a593Smuzhiyun #define ACC_ROB_ECC_ERR_MULTPL		BIT(1)
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun #define POLL_PERIOD			10
167*4882a593Smuzhiyun #define POLL_TIMEOUT			1000
168*4882a593Smuzhiyun #define WAIT_PERIOD_US_MAX		200
169*4882a593Smuzhiyun #define WAIT_PERIOD_US_MIN		100
170*4882a593Smuzhiyun #define MAX_WAIT_COUNTS			1000
171*4882a593Smuzhiyun #define QM_CACHE_WB_START		0x204
172*4882a593Smuzhiyun #define QM_CACHE_WB_DONE		0x208
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun #define PCI_BAR_2			2
175*4882a593Smuzhiyun #define QM_SQE_DATA_ALIGN_MASK		GENMASK(6, 0)
176*4882a593Smuzhiyun #define QMC_ALIGN(sz)			ALIGN(sz, 32)
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun #define QM_DBG_READ_LEN		256
179*4882a593Smuzhiyun #define QM_DBG_WRITE_LEN		1024
180*4882a593Smuzhiyun #define QM_DBG_TMP_BUF_LEN		22
181*4882a593Smuzhiyun #define QM_PCI_COMMAND_INVALID		~0
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun #define WAIT_PERIOD			20
184*4882a593Smuzhiyun #define REMOVE_WAIT_DELAY		10
185*4882a593Smuzhiyun #define QM_SQE_ADDR_MASK		GENMASK(7, 0)
186*4882a593Smuzhiyun #define QM_EQ_DEPTH			(1024 * 2)
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun #define QM_MK_CQC_DW3_V1(hop_num, pg_sz, buf_sz, cqe_sz) \
189*4882a593Smuzhiyun 	(((hop_num) << QM_CQ_HOP_NUM_SHIFT)	| \
190*4882a593Smuzhiyun 	((pg_sz) << QM_CQ_PAGE_SIZE_SHIFT)	| \
191*4882a593Smuzhiyun 	((buf_sz) << QM_CQ_BUF_SIZE_SHIFT)	| \
192*4882a593Smuzhiyun 	((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun #define QM_MK_CQC_DW3_V2(cqe_sz) \
195*4882a593Smuzhiyun 	((QM_Q_DEPTH - 1) | ((cqe_sz) << QM_CQ_CQE_SIZE_SHIFT))
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun #define QM_MK_SQC_W13(priority, orders, alg_type) \
198*4882a593Smuzhiyun 	(((priority) << QM_SQ_PRIORITY_SHIFT)	| \
199*4882a593Smuzhiyun 	((orders) << QM_SQ_ORDERS_SHIFT)	| \
200*4882a593Smuzhiyun 	(((alg_type) & QM_SQ_TYPE_MASK) << QM_SQ_TYPE_SHIFT))
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun #define QM_MK_SQC_DW3_V1(hop_num, pg_sz, buf_sz, sqe_sz) \
203*4882a593Smuzhiyun 	(((hop_num) << QM_SQ_HOP_NUM_SHIFT)	| \
204*4882a593Smuzhiyun 	((pg_sz) << QM_SQ_PAGE_SIZE_SHIFT)	| \
205*4882a593Smuzhiyun 	((buf_sz) << QM_SQ_BUF_SIZE_SHIFT)	| \
206*4882a593Smuzhiyun 	((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
207*4882a593Smuzhiyun 
208*4882a593Smuzhiyun #define QM_MK_SQC_DW3_V2(sqe_sz) \
209*4882a593Smuzhiyun 	((QM_Q_DEPTH - 1) | ((u32)ilog2(sqe_sz) << QM_SQ_SQE_SIZE_SHIFT))
210*4882a593Smuzhiyun 
211*4882a593Smuzhiyun #define INIT_QC_COMMON(qc, base, pasid) do {			\
212*4882a593Smuzhiyun 	(qc)->head = 0;						\
213*4882a593Smuzhiyun 	(qc)->tail = 0;						\
214*4882a593Smuzhiyun 	(qc)->base_l = cpu_to_le32(lower_32_bits(base));	\
215*4882a593Smuzhiyun 	(qc)->base_h = cpu_to_le32(upper_32_bits(base));	\
216*4882a593Smuzhiyun 	(qc)->dw3 = 0;						\
217*4882a593Smuzhiyun 	(qc)->w8 = 0;						\
218*4882a593Smuzhiyun 	(qc)->rsvd0 = 0;					\
219*4882a593Smuzhiyun 	(qc)->pasid = cpu_to_le16(pasid);			\
220*4882a593Smuzhiyun 	(qc)->w11 = 0;						\
221*4882a593Smuzhiyun 	(qc)->rsvd1 = 0;					\
222*4882a593Smuzhiyun } while (0)
223*4882a593Smuzhiyun 
224*4882a593Smuzhiyun enum vft_type {
225*4882a593Smuzhiyun 	SQC_VFT = 0,
226*4882a593Smuzhiyun 	CQC_VFT,
227*4882a593Smuzhiyun };
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun enum acc_err_result {
230*4882a593Smuzhiyun 	ACC_ERR_NONE,
231*4882a593Smuzhiyun 	ACC_ERR_NEED_RESET,
232*4882a593Smuzhiyun 	ACC_ERR_RECOVERED,
233*4882a593Smuzhiyun };
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun struct qm_cqe {
236*4882a593Smuzhiyun 	__le32 rsvd0;
237*4882a593Smuzhiyun 	__le16 cmd_id;
238*4882a593Smuzhiyun 	__le16 rsvd1;
239*4882a593Smuzhiyun 	__le16 sq_head;
240*4882a593Smuzhiyun 	__le16 sq_num;
241*4882a593Smuzhiyun 	__le16 rsvd2;
242*4882a593Smuzhiyun 	__le16 w7;
243*4882a593Smuzhiyun };
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun struct qm_eqe {
246*4882a593Smuzhiyun 	__le32 dw0;
247*4882a593Smuzhiyun };
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun struct qm_aeqe {
250*4882a593Smuzhiyun 	__le32 dw0;
251*4882a593Smuzhiyun };
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun struct qm_sqc {
254*4882a593Smuzhiyun 	__le16 head;
255*4882a593Smuzhiyun 	__le16 tail;
256*4882a593Smuzhiyun 	__le32 base_l;
257*4882a593Smuzhiyun 	__le32 base_h;
258*4882a593Smuzhiyun 	__le32 dw3;
259*4882a593Smuzhiyun 	__le16 w8;
260*4882a593Smuzhiyun 	__le16 rsvd0;
261*4882a593Smuzhiyun 	__le16 pasid;
262*4882a593Smuzhiyun 	__le16 w11;
263*4882a593Smuzhiyun 	__le16 cq_num;
264*4882a593Smuzhiyun 	__le16 w13;
265*4882a593Smuzhiyun 	__le32 rsvd1;
266*4882a593Smuzhiyun };
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun struct qm_cqc {
269*4882a593Smuzhiyun 	__le16 head;
270*4882a593Smuzhiyun 	__le16 tail;
271*4882a593Smuzhiyun 	__le32 base_l;
272*4882a593Smuzhiyun 	__le32 base_h;
273*4882a593Smuzhiyun 	__le32 dw3;
274*4882a593Smuzhiyun 	__le16 w8;
275*4882a593Smuzhiyun 	__le16 rsvd0;
276*4882a593Smuzhiyun 	__le16 pasid;
277*4882a593Smuzhiyun 	__le16 w11;
278*4882a593Smuzhiyun 	__le32 dw6;
279*4882a593Smuzhiyun 	__le32 rsvd1;
280*4882a593Smuzhiyun };
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun struct qm_eqc {
283*4882a593Smuzhiyun 	__le16 head;
284*4882a593Smuzhiyun 	__le16 tail;
285*4882a593Smuzhiyun 	__le32 base_l;
286*4882a593Smuzhiyun 	__le32 base_h;
287*4882a593Smuzhiyun 	__le32 dw3;
288*4882a593Smuzhiyun 	__le32 rsvd[2];
289*4882a593Smuzhiyun 	__le32 dw6;
290*4882a593Smuzhiyun };
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun struct qm_aeqc {
293*4882a593Smuzhiyun 	__le16 head;
294*4882a593Smuzhiyun 	__le16 tail;
295*4882a593Smuzhiyun 	__le32 base_l;
296*4882a593Smuzhiyun 	__le32 base_h;
297*4882a593Smuzhiyun 	__le32 dw3;
298*4882a593Smuzhiyun 	__le32 rsvd[2];
299*4882a593Smuzhiyun 	__le32 dw6;
300*4882a593Smuzhiyun };
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun struct qm_mailbox {
303*4882a593Smuzhiyun 	__le16 w0;
304*4882a593Smuzhiyun 	__le16 queue_num;
305*4882a593Smuzhiyun 	__le32 base_l;
306*4882a593Smuzhiyun 	__le32 base_h;
307*4882a593Smuzhiyun 	__le32 rsvd;
308*4882a593Smuzhiyun };
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun struct qm_doorbell {
311*4882a593Smuzhiyun 	__le16 queue_num;
312*4882a593Smuzhiyun 	__le16 cmd;
313*4882a593Smuzhiyun 	__le16 index;
314*4882a593Smuzhiyun 	__le16 priority;
315*4882a593Smuzhiyun };
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun struct hisi_qm_resource {
318*4882a593Smuzhiyun 	struct hisi_qm *qm;
319*4882a593Smuzhiyun 	int distance;
320*4882a593Smuzhiyun 	struct list_head list;
321*4882a593Smuzhiyun };
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun struct hisi_qm_hw_ops {
324*4882a593Smuzhiyun 	int (*get_vft)(struct hisi_qm *qm, u32 *base, u32 *number);
325*4882a593Smuzhiyun 	void (*qm_db)(struct hisi_qm *qm, u16 qn,
326*4882a593Smuzhiyun 		      u8 cmd, u16 index, u8 priority);
327*4882a593Smuzhiyun 	u32 (*get_irq_num)(struct hisi_qm *qm);
328*4882a593Smuzhiyun 	int (*debug_init)(struct hisi_qm *qm);
329*4882a593Smuzhiyun 	void (*hw_error_init)(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe);
330*4882a593Smuzhiyun 	void (*hw_error_uninit)(struct hisi_qm *qm);
331*4882a593Smuzhiyun 	enum acc_err_result (*hw_error_handle)(struct hisi_qm *qm);
332*4882a593Smuzhiyun };
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun struct qm_dfx_item {
335*4882a593Smuzhiyun 	const char *name;
336*4882a593Smuzhiyun 	u32 offset;
337*4882a593Smuzhiyun };
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun static struct qm_dfx_item qm_dfx_files[] = {
340*4882a593Smuzhiyun 	{"err_irq", offsetof(struct qm_dfx, err_irq_cnt)},
341*4882a593Smuzhiyun 	{"aeq_irq", offsetof(struct qm_dfx, aeq_irq_cnt)},
342*4882a593Smuzhiyun 	{"abnormal_irq", offsetof(struct qm_dfx, abnormal_irq_cnt)},
343*4882a593Smuzhiyun 	{"create_qp_err", offsetof(struct qm_dfx, create_qp_err_cnt)},
344*4882a593Smuzhiyun 	{"mb_err", offsetof(struct qm_dfx, mb_err_cnt)},
345*4882a593Smuzhiyun };
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun static const char * const qm_debug_file_name[] = {
348*4882a593Smuzhiyun 	[CURRENT_Q]    = "current_q",
349*4882a593Smuzhiyun 	[CLEAR_ENABLE] = "clear_enable",
350*4882a593Smuzhiyun };
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun struct hisi_qm_hw_error {
353*4882a593Smuzhiyun 	u32 int_msk;
354*4882a593Smuzhiyun 	const char *msg;
355*4882a593Smuzhiyun };
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun static const struct hisi_qm_hw_error qm_hw_error[] = {
358*4882a593Smuzhiyun 	{ .int_msk = BIT(0), .msg = "qm_axi_rresp" },
359*4882a593Smuzhiyun 	{ .int_msk = BIT(1), .msg = "qm_axi_bresp" },
360*4882a593Smuzhiyun 	{ .int_msk = BIT(2), .msg = "qm_ecc_mbit" },
361*4882a593Smuzhiyun 	{ .int_msk = BIT(3), .msg = "qm_ecc_1bit" },
362*4882a593Smuzhiyun 	{ .int_msk = BIT(4), .msg = "qm_acc_get_task_timeout" },
363*4882a593Smuzhiyun 	{ .int_msk = BIT(5), .msg = "qm_acc_do_task_timeout" },
364*4882a593Smuzhiyun 	{ .int_msk = BIT(6), .msg = "qm_acc_wb_not_ready_timeout" },
365*4882a593Smuzhiyun 	{ .int_msk = BIT(7), .msg = "qm_sq_cq_vf_invalid" },
366*4882a593Smuzhiyun 	{ .int_msk = BIT(8), .msg = "qm_cq_vf_invalid" },
367*4882a593Smuzhiyun 	{ .int_msk = BIT(9), .msg = "qm_sq_vf_invalid" },
368*4882a593Smuzhiyun 	{ .int_msk = BIT(10), .msg = "qm_db_timeout" },
369*4882a593Smuzhiyun 	{ .int_msk = BIT(11), .msg = "qm_of_fifo_of" },
370*4882a593Smuzhiyun 	{ .int_msk = BIT(12), .msg = "qm_db_random_invalid" },
371*4882a593Smuzhiyun 	{ /* sentinel */ }
372*4882a593Smuzhiyun };
373*4882a593Smuzhiyun 
374*4882a593Smuzhiyun static const char * const qm_db_timeout[] = {
375*4882a593Smuzhiyun 	"sq", "cq", "eq", "aeq",
376*4882a593Smuzhiyun };
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun static const char * const qm_fifo_overflow[] = {
379*4882a593Smuzhiyun 	"cq", "eq", "aeq",
380*4882a593Smuzhiyun };
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun static const char * const qm_s[] = {
383*4882a593Smuzhiyun 	"init", "start", "close", "stop",
384*4882a593Smuzhiyun };
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun static const char * const qp_s[] = {
387*4882a593Smuzhiyun 	"none", "init", "start", "stop", "close",
388*4882a593Smuzhiyun };
389*4882a593Smuzhiyun 
qm_avail_state(struct hisi_qm * qm,enum qm_state new)390*4882a593Smuzhiyun static bool qm_avail_state(struct hisi_qm *qm, enum qm_state new)
391*4882a593Smuzhiyun {
392*4882a593Smuzhiyun 	enum qm_state curr = atomic_read(&qm->status.flags);
393*4882a593Smuzhiyun 	bool avail = false;
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	switch (curr) {
396*4882a593Smuzhiyun 	case QM_INIT:
397*4882a593Smuzhiyun 		if (new == QM_START || new == QM_CLOSE)
398*4882a593Smuzhiyun 			avail = true;
399*4882a593Smuzhiyun 		break;
400*4882a593Smuzhiyun 	case QM_START:
401*4882a593Smuzhiyun 		if (new == QM_STOP)
402*4882a593Smuzhiyun 			avail = true;
403*4882a593Smuzhiyun 		break;
404*4882a593Smuzhiyun 	case QM_STOP:
405*4882a593Smuzhiyun 		if (new == QM_CLOSE || new == QM_START)
406*4882a593Smuzhiyun 			avail = true;
407*4882a593Smuzhiyun 		break;
408*4882a593Smuzhiyun 	default:
409*4882a593Smuzhiyun 		break;
410*4882a593Smuzhiyun 	}
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	dev_dbg(&qm->pdev->dev, "change qm state from %s to %s\n",
413*4882a593Smuzhiyun 		qm_s[curr], qm_s[new]);
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	if (!avail)
416*4882a593Smuzhiyun 		dev_warn(&qm->pdev->dev, "Can not change qm state from %s to %s\n",
417*4882a593Smuzhiyun 			 qm_s[curr], qm_s[new]);
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	return avail;
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun 
qm_qp_avail_state(struct hisi_qm * qm,struct hisi_qp * qp,enum qp_state new)422*4882a593Smuzhiyun static bool qm_qp_avail_state(struct hisi_qm *qm, struct hisi_qp *qp,
423*4882a593Smuzhiyun 			      enum qp_state new)
424*4882a593Smuzhiyun {
425*4882a593Smuzhiyun 	enum qm_state qm_curr = atomic_read(&qm->status.flags);
426*4882a593Smuzhiyun 	enum qp_state qp_curr = 0;
427*4882a593Smuzhiyun 	bool avail = false;
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	if (qp)
430*4882a593Smuzhiyun 		qp_curr = atomic_read(&qp->qp_status.flags);
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	switch (new) {
433*4882a593Smuzhiyun 	case QP_INIT:
434*4882a593Smuzhiyun 		if (qm_curr == QM_START || qm_curr == QM_INIT)
435*4882a593Smuzhiyun 			avail = true;
436*4882a593Smuzhiyun 		break;
437*4882a593Smuzhiyun 	case QP_START:
438*4882a593Smuzhiyun 		if ((qm_curr == QM_START && qp_curr == QP_INIT) ||
439*4882a593Smuzhiyun 		    (qm_curr == QM_START && qp_curr == QP_STOP))
440*4882a593Smuzhiyun 			avail = true;
441*4882a593Smuzhiyun 		break;
442*4882a593Smuzhiyun 	case QP_STOP:
443*4882a593Smuzhiyun 		if ((qm_curr == QM_START && qp_curr == QP_START) ||
444*4882a593Smuzhiyun 		    (qp_curr == QP_INIT))
445*4882a593Smuzhiyun 			avail = true;
446*4882a593Smuzhiyun 		break;
447*4882a593Smuzhiyun 	case QP_CLOSE:
448*4882a593Smuzhiyun 		if ((qm_curr == QM_START && qp_curr == QP_INIT) ||
449*4882a593Smuzhiyun 		    (qm_curr == QM_START && qp_curr == QP_STOP) ||
450*4882a593Smuzhiyun 		    (qm_curr == QM_STOP && qp_curr == QP_STOP)  ||
451*4882a593Smuzhiyun 		    (qm_curr == QM_STOP && qp_curr == QP_INIT))
452*4882a593Smuzhiyun 			avail = true;
453*4882a593Smuzhiyun 		break;
454*4882a593Smuzhiyun 	default:
455*4882a593Smuzhiyun 		break;
456*4882a593Smuzhiyun 	}
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	dev_dbg(&qm->pdev->dev, "change qp state from %s to %s in QM %s\n",
459*4882a593Smuzhiyun 		qp_s[qp_curr], qp_s[new], qm_s[qm_curr]);
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 	if (!avail)
462*4882a593Smuzhiyun 		dev_warn(&qm->pdev->dev,
463*4882a593Smuzhiyun 			 "Can not change qp state from %s to %s in QM %s\n",
464*4882a593Smuzhiyun 			 qp_s[qp_curr], qp_s[new], qm_s[qm_curr]);
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	return avail;
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun /* return 0 mailbox ready, -ETIMEDOUT hardware timeout */
qm_wait_mb_ready(struct hisi_qm * qm)470*4882a593Smuzhiyun static int qm_wait_mb_ready(struct hisi_qm *qm)
471*4882a593Smuzhiyun {
472*4882a593Smuzhiyun 	u32 val;
473*4882a593Smuzhiyun 
474*4882a593Smuzhiyun 	return readl_relaxed_poll_timeout(qm->io_base + QM_MB_CMD_SEND_BASE,
475*4882a593Smuzhiyun 					  val, !((val >> QM_MB_BUSY_SHIFT) &
476*4882a593Smuzhiyun 					  0x1), 10, 1000);
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun /* 128 bit should be written to hardware at one time to trigger a mailbox */
qm_mb_write(struct hisi_qm * qm,const void * src)480*4882a593Smuzhiyun static void qm_mb_write(struct hisi_qm *qm, const void *src)
481*4882a593Smuzhiyun {
482*4882a593Smuzhiyun 	void __iomem *fun_base = qm->io_base + QM_MB_CMD_SEND_BASE;
483*4882a593Smuzhiyun 	unsigned long tmp0 = 0, tmp1 = 0;
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	if (!IS_ENABLED(CONFIG_ARM64)) {
486*4882a593Smuzhiyun 		memcpy_toio(fun_base, src, 16);
487*4882a593Smuzhiyun 		wmb();
488*4882a593Smuzhiyun 		return;
489*4882a593Smuzhiyun 	}
490*4882a593Smuzhiyun 
491*4882a593Smuzhiyun 	asm volatile("ldp %0, %1, %3\n"
492*4882a593Smuzhiyun 		     "stp %0, %1, %2\n"
493*4882a593Smuzhiyun 		     "dsb sy\n"
494*4882a593Smuzhiyun 		     : "=&r" (tmp0),
495*4882a593Smuzhiyun 		       "=&r" (tmp1),
496*4882a593Smuzhiyun 		       "+Q" (*((char __iomem *)fun_base))
497*4882a593Smuzhiyun 		     : "Q" (*((char *)src))
498*4882a593Smuzhiyun 		     : "memory");
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun 
qm_mb(struct hisi_qm * qm,u8 cmd,dma_addr_t dma_addr,u16 queue,bool op)501*4882a593Smuzhiyun static int qm_mb(struct hisi_qm *qm, u8 cmd, dma_addr_t dma_addr, u16 queue,
502*4882a593Smuzhiyun 		 bool op)
503*4882a593Smuzhiyun {
504*4882a593Smuzhiyun 	struct qm_mailbox mailbox;
505*4882a593Smuzhiyun 	int ret = 0;
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	dev_dbg(&qm->pdev->dev, "QM mailbox request to q%u: %u-%llx\n",
508*4882a593Smuzhiyun 		queue, cmd, (unsigned long long)dma_addr);
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun 	mailbox.w0 = cpu_to_le16(cmd |
511*4882a593Smuzhiyun 		     (op ? 0x1 << QM_MB_OP_SHIFT : 0) |
512*4882a593Smuzhiyun 		     (0x1 << QM_MB_BUSY_SHIFT));
513*4882a593Smuzhiyun 	mailbox.queue_num = cpu_to_le16(queue);
514*4882a593Smuzhiyun 	mailbox.base_l = cpu_to_le32(lower_32_bits(dma_addr));
515*4882a593Smuzhiyun 	mailbox.base_h = cpu_to_le32(upper_32_bits(dma_addr));
516*4882a593Smuzhiyun 	mailbox.rsvd = 0;
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	mutex_lock(&qm->mailbox_lock);
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	if (unlikely(qm_wait_mb_ready(qm))) {
521*4882a593Smuzhiyun 		ret = -EBUSY;
522*4882a593Smuzhiyun 		dev_err(&qm->pdev->dev, "QM mailbox is busy to start!\n");
523*4882a593Smuzhiyun 		goto busy_unlock;
524*4882a593Smuzhiyun 	}
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	qm_mb_write(qm, &mailbox);
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	if (unlikely(qm_wait_mb_ready(qm))) {
529*4882a593Smuzhiyun 		ret = -EBUSY;
530*4882a593Smuzhiyun 		dev_err(&qm->pdev->dev, "QM mailbox operation timeout!\n");
531*4882a593Smuzhiyun 		goto busy_unlock;
532*4882a593Smuzhiyun 	}
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun busy_unlock:
535*4882a593Smuzhiyun 	mutex_unlock(&qm->mailbox_lock);
536*4882a593Smuzhiyun 
537*4882a593Smuzhiyun 	if (ret)
538*4882a593Smuzhiyun 		atomic64_inc(&qm->debug.dfx.mb_err_cnt);
539*4882a593Smuzhiyun 	return ret;
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun 
qm_db_v1(struct hisi_qm * qm,u16 qn,u8 cmd,u16 index,u8 priority)542*4882a593Smuzhiyun static void qm_db_v1(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
543*4882a593Smuzhiyun {
544*4882a593Smuzhiyun 	u64 doorbell;
545*4882a593Smuzhiyun 
546*4882a593Smuzhiyun 	doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V1) |
547*4882a593Smuzhiyun 		   ((u64)index << QM_DB_INDEX_SHIFT_V1)  |
548*4882a593Smuzhiyun 		   ((u64)priority << QM_DB_PRIORITY_SHIFT_V1);
549*4882a593Smuzhiyun 
550*4882a593Smuzhiyun 	writeq(doorbell, qm->io_base + QM_DOORBELL_BASE_V1);
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun 
qm_db_v2(struct hisi_qm * qm,u16 qn,u8 cmd,u16 index,u8 priority)553*4882a593Smuzhiyun static void qm_db_v2(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
554*4882a593Smuzhiyun {
555*4882a593Smuzhiyun 	u64 doorbell;
556*4882a593Smuzhiyun 	u64 dbase;
557*4882a593Smuzhiyun 	u16 randata = 0;
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	if (cmd == QM_DOORBELL_CMD_SQ || cmd == QM_DOORBELL_CMD_CQ)
560*4882a593Smuzhiyun 		dbase = QM_DOORBELL_SQ_CQ_BASE_V2;
561*4882a593Smuzhiyun 	else
562*4882a593Smuzhiyun 		dbase = QM_DOORBELL_EQ_AEQ_BASE_V2;
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	doorbell = qn | ((u64)cmd << QM_DB_CMD_SHIFT_V2) |
565*4882a593Smuzhiyun 		   ((u64)randata << QM_DB_RAND_SHIFT_V2) |
566*4882a593Smuzhiyun 		   ((u64)index << QM_DB_INDEX_SHIFT_V2)	 |
567*4882a593Smuzhiyun 		   ((u64)priority << QM_DB_PRIORITY_SHIFT_V2);
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	writeq(doorbell, qm->io_base + dbase);
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun 
qm_db(struct hisi_qm * qm,u16 qn,u8 cmd,u16 index,u8 priority)572*4882a593Smuzhiyun static void qm_db(struct hisi_qm *qm, u16 qn, u8 cmd, u16 index, u8 priority)
573*4882a593Smuzhiyun {
574*4882a593Smuzhiyun 	dev_dbg(&qm->pdev->dev, "QM doorbell request: qn=%u, cmd=%u, index=%u\n",
575*4882a593Smuzhiyun 		qn, cmd, index);
576*4882a593Smuzhiyun 
577*4882a593Smuzhiyun 	qm->ops->qm_db(qm, qn, cmd, index, priority);
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun 
qm_dev_mem_reset(struct hisi_qm * qm)580*4882a593Smuzhiyun static int qm_dev_mem_reset(struct hisi_qm *qm)
581*4882a593Smuzhiyun {
582*4882a593Smuzhiyun 	u32 val;
583*4882a593Smuzhiyun 
584*4882a593Smuzhiyun 	writel(0x1, qm->io_base + QM_MEM_START_INIT);
585*4882a593Smuzhiyun 	return readl_relaxed_poll_timeout(qm->io_base + QM_MEM_INIT_DONE, val,
586*4882a593Smuzhiyun 					  val & BIT(0), 10, 1000);
587*4882a593Smuzhiyun }
588*4882a593Smuzhiyun 
qm_get_irq_num_v1(struct hisi_qm * qm)589*4882a593Smuzhiyun static u32 qm_get_irq_num_v1(struct hisi_qm *qm)
590*4882a593Smuzhiyun {
591*4882a593Smuzhiyun 	return QM_IRQ_NUM_V1;
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun 
qm_get_irq_num_v2(struct hisi_qm * qm)594*4882a593Smuzhiyun static u32 qm_get_irq_num_v2(struct hisi_qm *qm)
595*4882a593Smuzhiyun {
596*4882a593Smuzhiyun 	if (qm->fun_type == QM_HW_PF)
597*4882a593Smuzhiyun 		return QM_IRQ_NUM_PF_V2;
598*4882a593Smuzhiyun 	else
599*4882a593Smuzhiyun 		return QM_IRQ_NUM_VF_V2;
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun 
qm_to_hisi_qp(struct hisi_qm * qm,struct qm_eqe * eqe)602*4882a593Smuzhiyun static struct hisi_qp *qm_to_hisi_qp(struct hisi_qm *qm, struct qm_eqe *eqe)
603*4882a593Smuzhiyun {
604*4882a593Smuzhiyun 	u16 cqn = le32_to_cpu(eqe->dw0) & QM_EQE_CQN_MASK;
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun 	return &qm->qp_array[cqn];
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun 
qm_cq_head_update(struct hisi_qp * qp)609*4882a593Smuzhiyun static void qm_cq_head_update(struct hisi_qp *qp)
610*4882a593Smuzhiyun {
611*4882a593Smuzhiyun 	if (qp->qp_status.cq_head == QM_Q_DEPTH - 1) {
612*4882a593Smuzhiyun 		qp->qp_status.cqc_phase = !qp->qp_status.cqc_phase;
613*4882a593Smuzhiyun 		qp->qp_status.cq_head = 0;
614*4882a593Smuzhiyun 	} else {
615*4882a593Smuzhiyun 		qp->qp_status.cq_head++;
616*4882a593Smuzhiyun 	}
617*4882a593Smuzhiyun }
618*4882a593Smuzhiyun 
qm_poll_qp(struct hisi_qp * qp,struct hisi_qm * qm)619*4882a593Smuzhiyun static void qm_poll_qp(struct hisi_qp *qp, struct hisi_qm *qm)
620*4882a593Smuzhiyun {
621*4882a593Smuzhiyun 	if (qp->event_cb) {
622*4882a593Smuzhiyun 		qp->event_cb(qp);
623*4882a593Smuzhiyun 		return;
624*4882a593Smuzhiyun 	}
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 	if (qp->req_cb) {
627*4882a593Smuzhiyun 		struct qm_cqe *cqe = qp->cqe + qp->qp_status.cq_head;
628*4882a593Smuzhiyun 
629*4882a593Smuzhiyun 		while (QM_CQE_PHASE(cqe) == qp->qp_status.cqc_phase) {
630*4882a593Smuzhiyun 			dma_rmb();
631*4882a593Smuzhiyun 			qp->req_cb(qp, qp->sqe + qm->sqe_size *
632*4882a593Smuzhiyun 				   le16_to_cpu(cqe->sq_head));
633*4882a593Smuzhiyun 			qm_cq_head_update(qp);
634*4882a593Smuzhiyun 			cqe = qp->cqe + qp->qp_status.cq_head;
635*4882a593Smuzhiyun 			qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
636*4882a593Smuzhiyun 			      qp->qp_status.cq_head, 0);
637*4882a593Smuzhiyun 			atomic_dec(&qp->qp_status.used);
638*4882a593Smuzhiyun 		}
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 		/* set c_flag */
641*4882a593Smuzhiyun 		qm_db(qm, qp->qp_id, QM_DOORBELL_CMD_CQ,
642*4882a593Smuzhiyun 		      qp->qp_status.cq_head, 1);
643*4882a593Smuzhiyun 	}
644*4882a593Smuzhiyun }
645*4882a593Smuzhiyun 
qm_work_process(struct work_struct * work)646*4882a593Smuzhiyun static void qm_work_process(struct work_struct *work)
647*4882a593Smuzhiyun {
648*4882a593Smuzhiyun 	struct hisi_qm *qm = container_of(work, struct hisi_qm, work);
649*4882a593Smuzhiyun 	struct qm_eqe *eqe = qm->eqe + qm->status.eq_head;
650*4882a593Smuzhiyun 	struct hisi_qp *qp;
651*4882a593Smuzhiyun 	int eqe_num = 0;
652*4882a593Smuzhiyun 
653*4882a593Smuzhiyun 	while (QM_EQE_PHASE(eqe) == qm->status.eqc_phase) {
654*4882a593Smuzhiyun 		eqe_num++;
655*4882a593Smuzhiyun 		qp = qm_to_hisi_qp(qm, eqe);
656*4882a593Smuzhiyun 		qm_poll_qp(qp, qm);
657*4882a593Smuzhiyun 
658*4882a593Smuzhiyun 		if (qm->status.eq_head == QM_EQ_DEPTH - 1) {
659*4882a593Smuzhiyun 			qm->status.eqc_phase = !qm->status.eqc_phase;
660*4882a593Smuzhiyun 			eqe = qm->eqe;
661*4882a593Smuzhiyun 			qm->status.eq_head = 0;
662*4882a593Smuzhiyun 		} else {
663*4882a593Smuzhiyun 			eqe++;
664*4882a593Smuzhiyun 			qm->status.eq_head++;
665*4882a593Smuzhiyun 		}
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun 		if (eqe_num == QM_EQ_DEPTH / 2 - 1) {
668*4882a593Smuzhiyun 			eqe_num = 0;
669*4882a593Smuzhiyun 			qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
670*4882a593Smuzhiyun 		}
671*4882a593Smuzhiyun 	}
672*4882a593Smuzhiyun 
673*4882a593Smuzhiyun 	qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun 
do_qm_irq(int irq,void * data)676*4882a593Smuzhiyun static irqreturn_t do_qm_irq(int irq, void *data)
677*4882a593Smuzhiyun {
678*4882a593Smuzhiyun 	struct hisi_qm *qm = (struct hisi_qm *)data;
679*4882a593Smuzhiyun 
680*4882a593Smuzhiyun 	/* the workqueue created by device driver of QM */
681*4882a593Smuzhiyun 	if (qm->wq)
682*4882a593Smuzhiyun 		queue_work(qm->wq, &qm->work);
683*4882a593Smuzhiyun 	else
684*4882a593Smuzhiyun 		schedule_work(&qm->work);
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun 	return IRQ_HANDLED;
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun 
qm_irq(int irq,void * data)689*4882a593Smuzhiyun static irqreturn_t qm_irq(int irq, void *data)
690*4882a593Smuzhiyun {
691*4882a593Smuzhiyun 	struct hisi_qm *qm = data;
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 	if (readl(qm->io_base + QM_VF_EQ_INT_SOURCE))
694*4882a593Smuzhiyun 		return do_qm_irq(irq, data);
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun 	atomic64_inc(&qm->debug.dfx.err_irq_cnt);
697*4882a593Smuzhiyun 	dev_err(&qm->pdev->dev, "invalid int source\n");
698*4882a593Smuzhiyun 	qm_db(qm, 0, QM_DOORBELL_CMD_EQ, qm->status.eq_head, 0);
699*4882a593Smuzhiyun 
700*4882a593Smuzhiyun 	return IRQ_NONE;
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun 
qm_aeq_irq(int irq,void * data)703*4882a593Smuzhiyun static irqreturn_t qm_aeq_irq(int irq, void *data)
704*4882a593Smuzhiyun {
705*4882a593Smuzhiyun 	struct hisi_qm *qm = data;
706*4882a593Smuzhiyun 	struct qm_aeqe *aeqe = qm->aeqe + qm->status.aeq_head;
707*4882a593Smuzhiyun 	u32 type;
708*4882a593Smuzhiyun 
709*4882a593Smuzhiyun 	atomic64_inc(&qm->debug.dfx.aeq_irq_cnt);
710*4882a593Smuzhiyun 	if (!readl(qm->io_base + QM_VF_AEQ_INT_SOURCE))
711*4882a593Smuzhiyun 		return IRQ_NONE;
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 	while (QM_AEQE_PHASE(aeqe) == qm->status.aeqc_phase) {
714*4882a593Smuzhiyun 		type = le32_to_cpu(aeqe->dw0) >> QM_AEQE_TYPE_SHIFT;
715*4882a593Smuzhiyun 		if (type < ARRAY_SIZE(qm_fifo_overflow))
716*4882a593Smuzhiyun 			dev_err(&qm->pdev->dev, "%s overflow\n",
717*4882a593Smuzhiyun 				qm_fifo_overflow[type]);
718*4882a593Smuzhiyun 		else
719*4882a593Smuzhiyun 			dev_err(&qm->pdev->dev, "unknown error type %d\n",
720*4882a593Smuzhiyun 				type);
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 		if (qm->status.aeq_head == QM_Q_DEPTH - 1) {
723*4882a593Smuzhiyun 			qm->status.aeqc_phase = !qm->status.aeqc_phase;
724*4882a593Smuzhiyun 			aeqe = qm->aeqe;
725*4882a593Smuzhiyun 			qm->status.aeq_head = 0;
726*4882a593Smuzhiyun 		} else {
727*4882a593Smuzhiyun 			aeqe++;
728*4882a593Smuzhiyun 			qm->status.aeq_head++;
729*4882a593Smuzhiyun 		}
730*4882a593Smuzhiyun 
731*4882a593Smuzhiyun 		qm_db(qm, 0, QM_DOORBELL_CMD_AEQ, qm->status.aeq_head, 0);
732*4882a593Smuzhiyun 	}
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 	return IRQ_HANDLED;
735*4882a593Smuzhiyun }
736*4882a593Smuzhiyun 
qm_irq_unregister(struct hisi_qm * qm)737*4882a593Smuzhiyun static void qm_irq_unregister(struct hisi_qm *qm)
738*4882a593Smuzhiyun {
739*4882a593Smuzhiyun 	struct pci_dev *pdev = qm->pdev;
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun 	free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 	if (qm->ver == QM_HW_V1)
744*4882a593Smuzhiyun 		return;
745*4882a593Smuzhiyun 
746*4882a593Smuzhiyun 	free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
747*4882a593Smuzhiyun 
748*4882a593Smuzhiyun 	if (qm->fun_type == QM_HW_PF)
749*4882a593Smuzhiyun 		free_irq(pci_irq_vector(pdev,
750*4882a593Smuzhiyun 			 QM_ABNORMAL_EVENT_IRQ_VECTOR), qm);
751*4882a593Smuzhiyun }
752*4882a593Smuzhiyun 
qm_init_qp_status(struct hisi_qp * qp)753*4882a593Smuzhiyun static void qm_init_qp_status(struct hisi_qp *qp)
754*4882a593Smuzhiyun {
755*4882a593Smuzhiyun 	struct hisi_qp_status *qp_status = &qp->qp_status;
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	qp_status->sq_tail = 0;
758*4882a593Smuzhiyun 	qp_status->cq_head = 0;
759*4882a593Smuzhiyun 	qp_status->cqc_phase = true;
760*4882a593Smuzhiyun 	atomic_set(&qp_status->used, 0);
761*4882a593Smuzhiyun }
762*4882a593Smuzhiyun 
qm_vft_data_cfg(struct hisi_qm * qm,enum vft_type type,u32 base,u32 number)763*4882a593Smuzhiyun static void qm_vft_data_cfg(struct hisi_qm *qm, enum vft_type type, u32 base,
764*4882a593Smuzhiyun 			    u32 number)
765*4882a593Smuzhiyun {
766*4882a593Smuzhiyun 	u64 tmp = 0;
767*4882a593Smuzhiyun 
768*4882a593Smuzhiyun 	if (number > 0) {
769*4882a593Smuzhiyun 		switch (type) {
770*4882a593Smuzhiyun 		case SQC_VFT:
771*4882a593Smuzhiyun 			if (qm->ver == QM_HW_V1) {
772*4882a593Smuzhiyun 				tmp = QM_SQC_VFT_BUF_SIZE	|
773*4882a593Smuzhiyun 				      QM_SQC_VFT_SQC_SIZE	|
774*4882a593Smuzhiyun 				      QM_SQC_VFT_INDEX_NUMBER	|
775*4882a593Smuzhiyun 				      QM_SQC_VFT_VALID		|
776*4882a593Smuzhiyun 				      (u64)base << QM_SQC_VFT_START_SQN_SHIFT;
777*4882a593Smuzhiyun 			} else {
778*4882a593Smuzhiyun 				tmp = (u64)base << QM_SQC_VFT_START_SQN_SHIFT |
779*4882a593Smuzhiyun 				      QM_SQC_VFT_VALID |
780*4882a593Smuzhiyun 				      (u64)(number - 1) << QM_SQC_VFT_SQN_SHIFT;
781*4882a593Smuzhiyun 			}
782*4882a593Smuzhiyun 			break;
783*4882a593Smuzhiyun 		case CQC_VFT:
784*4882a593Smuzhiyun 			if (qm->ver == QM_HW_V1) {
785*4882a593Smuzhiyun 				tmp = QM_CQC_VFT_BUF_SIZE	|
786*4882a593Smuzhiyun 				      QM_CQC_VFT_SQC_SIZE	|
787*4882a593Smuzhiyun 				      QM_CQC_VFT_INDEX_NUMBER	|
788*4882a593Smuzhiyun 				      QM_CQC_VFT_VALID;
789*4882a593Smuzhiyun 			} else {
790*4882a593Smuzhiyun 				tmp = QM_CQC_VFT_VALID;
791*4882a593Smuzhiyun 			}
792*4882a593Smuzhiyun 			break;
793*4882a593Smuzhiyun 		}
794*4882a593Smuzhiyun 	}
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun 	writel(lower_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_L);
797*4882a593Smuzhiyun 	writel(upper_32_bits(tmp), qm->io_base + QM_VFT_CFG_DATA_H);
798*4882a593Smuzhiyun }
799*4882a593Smuzhiyun 
qm_set_vft_common(struct hisi_qm * qm,enum vft_type type,u32 fun_num,u32 base,u32 number)800*4882a593Smuzhiyun static int qm_set_vft_common(struct hisi_qm *qm, enum vft_type type,
801*4882a593Smuzhiyun 			     u32 fun_num, u32 base, u32 number)
802*4882a593Smuzhiyun {
803*4882a593Smuzhiyun 	unsigned int val;
804*4882a593Smuzhiyun 	int ret;
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 	ret = readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
807*4882a593Smuzhiyun 					 val & BIT(0), 10, 1000);
808*4882a593Smuzhiyun 	if (ret)
809*4882a593Smuzhiyun 		return ret;
810*4882a593Smuzhiyun 
811*4882a593Smuzhiyun 	writel(0x0, qm->io_base + QM_VFT_CFG_OP_WR);
812*4882a593Smuzhiyun 	writel(type, qm->io_base + QM_VFT_CFG_TYPE);
813*4882a593Smuzhiyun 	writel(fun_num, qm->io_base + QM_VFT_CFG);
814*4882a593Smuzhiyun 
815*4882a593Smuzhiyun 	qm_vft_data_cfg(qm, type, base, number);
816*4882a593Smuzhiyun 
817*4882a593Smuzhiyun 	writel(0x0, qm->io_base + QM_VFT_CFG_RDY);
818*4882a593Smuzhiyun 	writel(0x1, qm->io_base + QM_VFT_CFG_OP_ENABLE);
819*4882a593Smuzhiyun 
820*4882a593Smuzhiyun 	return readl_relaxed_poll_timeout(qm->io_base + QM_VFT_CFG_RDY, val,
821*4882a593Smuzhiyun 					  val & BIT(0), 10, 1000);
822*4882a593Smuzhiyun }
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun /* The config should be conducted after qm_dev_mem_reset() */
qm_set_sqc_cqc_vft(struct hisi_qm * qm,u32 fun_num,u32 base,u32 number)825*4882a593Smuzhiyun static int qm_set_sqc_cqc_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
826*4882a593Smuzhiyun 			      u32 number)
827*4882a593Smuzhiyun {
828*4882a593Smuzhiyun 	int ret, i;
829*4882a593Smuzhiyun 
830*4882a593Smuzhiyun 	for (i = SQC_VFT; i <= CQC_VFT; i++) {
831*4882a593Smuzhiyun 		ret = qm_set_vft_common(qm, i, fun_num, base, number);
832*4882a593Smuzhiyun 		if (ret)
833*4882a593Smuzhiyun 			return ret;
834*4882a593Smuzhiyun 	}
835*4882a593Smuzhiyun 
836*4882a593Smuzhiyun 	return 0;
837*4882a593Smuzhiyun }
838*4882a593Smuzhiyun 
qm_get_vft_v2(struct hisi_qm * qm,u32 * base,u32 * number)839*4882a593Smuzhiyun static int qm_get_vft_v2(struct hisi_qm *qm, u32 *base, u32 *number)
840*4882a593Smuzhiyun {
841*4882a593Smuzhiyun 	u64 sqc_vft;
842*4882a593Smuzhiyun 	int ret;
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 	ret = qm_mb(qm, QM_MB_CMD_SQC_VFT_V2, 0, 0, 1);
845*4882a593Smuzhiyun 	if (ret)
846*4882a593Smuzhiyun 		return ret;
847*4882a593Smuzhiyun 
848*4882a593Smuzhiyun 	sqc_vft = readl(qm->io_base + QM_MB_CMD_DATA_ADDR_L) |
849*4882a593Smuzhiyun 		  ((u64)readl(qm->io_base + QM_MB_CMD_DATA_ADDR_H) << 32);
850*4882a593Smuzhiyun 	*base = QM_SQC_VFT_BASE_MASK_V2 & (sqc_vft >> QM_SQC_VFT_BASE_SHIFT_V2);
851*4882a593Smuzhiyun 	*number = (QM_SQC_VFT_NUM_MASK_v2 &
852*4882a593Smuzhiyun 		   (sqc_vft >> QM_SQC_VFT_NUM_SHIFT_V2)) + 1;
853*4882a593Smuzhiyun 
854*4882a593Smuzhiyun 	return 0;
855*4882a593Smuzhiyun }
856*4882a593Smuzhiyun 
file_to_qm(struct debugfs_file * file)857*4882a593Smuzhiyun static struct hisi_qm *file_to_qm(struct debugfs_file *file)
858*4882a593Smuzhiyun {
859*4882a593Smuzhiyun 	struct qm_debug *debug = file->debug;
860*4882a593Smuzhiyun 
861*4882a593Smuzhiyun 	return container_of(debug, struct hisi_qm, debug);
862*4882a593Smuzhiyun }
863*4882a593Smuzhiyun 
current_q_read(struct debugfs_file * file)864*4882a593Smuzhiyun static u32 current_q_read(struct debugfs_file *file)
865*4882a593Smuzhiyun {
866*4882a593Smuzhiyun 	struct hisi_qm *qm = file_to_qm(file);
867*4882a593Smuzhiyun 
868*4882a593Smuzhiyun 	return readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) >> QM_DFX_QN_SHIFT;
869*4882a593Smuzhiyun }
870*4882a593Smuzhiyun 
current_q_write(struct debugfs_file * file,u32 val)871*4882a593Smuzhiyun static int current_q_write(struct debugfs_file *file, u32 val)
872*4882a593Smuzhiyun {
873*4882a593Smuzhiyun 	struct hisi_qm *qm = file_to_qm(file);
874*4882a593Smuzhiyun 	u32 tmp;
875*4882a593Smuzhiyun 
876*4882a593Smuzhiyun 	if (val >= qm->debug.curr_qm_qp_num)
877*4882a593Smuzhiyun 		return -EINVAL;
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun 	tmp = val << QM_DFX_QN_SHIFT |
880*4882a593Smuzhiyun 	      (readl(qm->io_base + QM_DFX_SQE_CNT_VF_SQN) & CURRENT_FUN_MASK);
881*4882a593Smuzhiyun 	writel(tmp, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
882*4882a593Smuzhiyun 
883*4882a593Smuzhiyun 	tmp = val << QM_DFX_QN_SHIFT |
884*4882a593Smuzhiyun 	      (readl(qm->io_base + QM_DFX_CQE_CNT_VF_CQN) & CURRENT_FUN_MASK);
885*4882a593Smuzhiyun 	writel(tmp, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 	return 0;
888*4882a593Smuzhiyun }
889*4882a593Smuzhiyun 
clear_enable_read(struct debugfs_file * file)890*4882a593Smuzhiyun static u32 clear_enable_read(struct debugfs_file *file)
891*4882a593Smuzhiyun {
892*4882a593Smuzhiyun 	struct hisi_qm *qm = file_to_qm(file);
893*4882a593Smuzhiyun 
894*4882a593Smuzhiyun 	return readl(qm->io_base + QM_DFX_CNT_CLR_CE);
895*4882a593Smuzhiyun }
896*4882a593Smuzhiyun 
897*4882a593Smuzhiyun /* rd_clr_ctrl 1 enable read clear, otherwise 0 disable it */
clear_enable_write(struct debugfs_file * file,u32 rd_clr_ctrl)898*4882a593Smuzhiyun static int clear_enable_write(struct debugfs_file *file, u32 rd_clr_ctrl)
899*4882a593Smuzhiyun {
900*4882a593Smuzhiyun 	struct hisi_qm *qm = file_to_qm(file);
901*4882a593Smuzhiyun 
902*4882a593Smuzhiyun 	if (rd_clr_ctrl > 1)
903*4882a593Smuzhiyun 		return -EINVAL;
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun 	writel(rd_clr_ctrl, qm->io_base + QM_DFX_CNT_CLR_CE);
906*4882a593Smuzhiyun 
907*4882a593Smuzhiyun 	return 0;
908*4882a593Smuzhiyun }
909*4882a593Smuzhiyun 
qm_debug_read(struct file * filp,char __user * buf,size_t count,loff_t * pos)910*4882a593Smuzhiyun static ssize_t qm_debug_read(struct file *filp, char __user *buf,
911*4882a593Smuzhiyun 			     size_t count, loff_t *pos)
912*4882a593Smuzhiyun {
913*4882a593Smuzhiyun 	struct debugfs_file *file = filp->private_data;
914*4882a593Smuzhiyun 	enum qm_debug_file index = file->index;
915*4882a593Smuzhiyun 	char tbuf[QM_DBG_TMP_BUF_LEN];
916*4882a593Smuzhiyun 	u32 val;
917*4882a593Smuzhiyun 	int ret;
918*4882a593Smuzhiyun 
919*4882a593Smuzhiyun 	mutex_lock(&file->lock);
920*4882a593Smuzhiyun 	switch (index) {
921*4882a593Smuzhiyun 	case CURRENT_Q:
922*4882a593Smuzhiyun 		val = current_q_read(file);
923*4882a593Smuzhiyun 		break;
924*4882a593Smuzhiyun 	case CLEAR_ENABLE:
925*4882a593Smuzhiyun 		val = clear_enable_read(file);
926*4882a593Smuzhiyun 		break;
927*4882a593Smuzhiyun 	default:
928*4882a593Smuzhiyun 		mutex_unlock(&file->lock);
929*4882a593Smuzhiyun 		return -EINVAL;
930*4882a593Smuzhiyun 	}
931*4882a593Smuzhiyun 	mutex_unlock(&file->lock);
932*4882a593Smuzhiyun 	ret = sprintf(tbuf, "%u\n", val);
933*4882a593Smuzhiyun 	return simple_read_from_buffer(buf, count, pos, tbuf, ret);
934*4882a593Smuzhiyun }
935*4882a593Smuzhiyun 
qm_debug_write(struct file * filp,const char __user * buf,size_t count,loff_t * pos)936*4882a593Smuzhiyun static ssize_t qm_debug_write(struct file *filp, const char __user *buf,
937*4882a593Smuzhiyun 			      size_t count, loff_t *pos)
938*4882a593Smuzhiyun {
939*4882a593Smuzhiyun 	struct debugfs_file *file = filp->private_data;
940*4882a593Smuzhiyun 	enum qm_debug_file index = file->index;
941*4882a593Smuzhiyun 	unsigned long val;
942*4882a593Smuzhiyun 	char tbuf[QM_DBG_TMP_BUF_LEN];
943*4882a593Smuzhiyun 	int len, ret;
944*4882a593Smuzhiyun 
945*4882a593Smuzhiyun 	if (*pos != 0)
946*4882a593Smuzhiyun 		return 0;
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun 	if (count >= QM_DBG_TMP_BUF_LEN)
949*4882a593Smuzhiyun 		return -ENOSPC;
950*4882a593Smuzhiyun 
951*4882a593Smuzhiyun 	len = simple_write_to_buffer(tbuf, QM_DBG_TMP_BUF_LEN - 1, pos, buf,
952*4882a593Smuzhiyun 				     count);
953*4882a593Smuzhiyun 	if (len < 0)
954*4882a593Smuzhiyun 		return len;
955*4882a593Smuzhiyun 
956*4882a593Smuzhiyun 	tbuf[len] = '\0';
957*4882a593Smuzhiyun 	if (kstrtoul(tbuf, 0, &val))
958*4882a593Smuzhiyun 		return -EFAULT;
959*4882a593Smuzhiyun 
960*4882a593Smuzhiyun 	mutex_lock(&file->lock);
961*4882a593Smuzhiyun 	switch (index) {
962*4882a593Smuzhiyun 	case CURRENT_Q:
963*4882a593Smuzhiyun 		ret = current_q_write(file, val);
964*4882a593Smuzhiyun 		if (ret)
965*4882a593Smuzhiyun 			goto err_input;
966*4882a593Smuzhiyun 		break;
967*4882a593Smuzhiyun 	case CLEAR_ENABLE:
968*4882a593Smuzhiyun 		ret = clear_enable_write(file, val);
969*4882a593Smuzhiyun 		if (ret)
970*4882a593Smuzhiyun 			goto err_input;
971*4882a593Smuzhiyun 		break;
972*4882a593Smuzhiyun 	default:
973*4882a593Smuzhiyun 		ret = -EINVAL;
974*4882a593Smuzhiyun 		goto err_input;
975*4882a593Smuzhiyun 	}
976*4882a593Smuzhiyun 	mutex_unlock(&file->lock);
977*4882a593Smuzhiyun 
978*4882a593Smuzhiyun 	return count;
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun err_input:
981*4882a593Smuzhiyun 	mutex_unlock(&file->lock);
982*4882a593Smuzhiyun 	return ret;
983*4882a593Smuzhiyun }
984*4882a593Smuzhiyun 
985*4882a593Smuzhiyun static const struct file_operations qm_debug_fops = {
986*4882a593Smuzhiyun 	.owner = THIS_MODULE,
987*4882a593Smuzhiyun 	.open = simple_open,
988*4882a593Smuzhiyun 	.read = qm_debug_read,
989*4882a593Smuzhiyun 	.write = qm_debug_write,
990*4882a593Smuzhiyun };
991*4882a593Smuzhiyun 
992*4882a593Smuzhiyun struct qm_dfx_registers {
993*4882a593Smuzhiyun 	char  *reg_name;
994*4882a593Smuzhiyun 	u64   reg_offset;
995*4882a593Smuzhiyun };
996*4882a593Smuzhiyun 
997*4882a593Smuzhiyun #define CNT_CYC_REGS_NUM		10
998*4882a593Smuzhiyun static struct qm_dfx_registers qm_dfx_regs[] = {
999*4882a593Smuzhiyun 	/* XXX_CNT are reading clear register */
1000*4882a593Smuzhiyun 	{"QM_ECC_1BIT_CNT               ",  0x104000ull},
1001*4882a593Smuzhiyun 	{"QM_ECC_MBIT_CNT               ",  0x104008ull},
1002*4882a593Smuzhiyun 	{"QM_DFX_MB_CNT                 ",  0x104018ull},
1003*4882a593Smuzhiyun 	{"QM_DFX_DB_CNT                 ",  0x104028ull},
1004*4882a593Smuzhiyun 	{"QM_DFX_SQE_CNT                ",  0x104038ull},
1005*4882a593Smuzhiyun 	{"QM_DFX_CQE_CNT                ",  0x104048ull},
1006*4882a593Smuzhiyun 	{"QM_DFX_SEND_SQE_TO_ACC_CNT    ",  0x104050ull},
1007*4882a593Smuzhiyun 	{"QM_DFX_WB_SQE_FROM_ACC_CNT    ",  0x104058ull},
1008*4882a593Smuzhiyun 	{"QM_DFX_ACC_FINISH_CNT         ",  0x104060ull},
1009*4882a593Smuzhiyun 	{"QM_DFX_CQE_ERR_CNT            ",  0x1040b4ull},
1010*4882a593Smuzhiyun 	{"QM_DFX_FUNS_ACTIVE_ST         ",  0x200ull},
1011*4882a593Smuzhiyun 	{"QM_ECC_1BIT_INF               ",  0x104004ull},
1012*4882a593Smuzhiyun 	{"QM_ECC_MBIT_INF               ",  0x10400cull},
1013*4882a593Smuzhiyun 	{"QM_DFX_ACC_RDY_VLD0           ",  0x1040a0ull},
1014*4882a593Smuzhiyun 	{"QM_DFX_ACC_RDY_VLD1           ",  0x1040a4ull},
1015*4882a593Smuzhiyun 	{"QM_DFX_AXI_RDY_VLD            ",  0x1040a8ull},
1016*4882a593Smuzhiyun 	{"QM_DFX_FF_ST0                 ",  0x1040c8ull},
1017*4882a593Smuzhiyun 	{"QM_DFX_FF_ST1                 ",  0x1040ccull},
1018*4882a593Smuzhiyun 	{"QM_DFX_FF_ST2                 ",  0x1040d0ull},
1019*4882a593Smuzhiyun 	{"QM_DFX_FF_ST3                 ",  0x1040d4ull},
1020*4882a593Smuzhiyun 	{"QM_DFX_FF_ST4                 ",  0x1040d8ull},
1021*4882a593Smuzhiyun 	{"QM_DFX_FF_ST5                 ",  0x1040dcull},
1022*4882a593Smuzhiyun 	{"QM_DFX_FF_ST6                 ",  0x1040e0ull},
1023*4882a593Smuzhiyun 	{"QM_IN_IDLE_ST                 ",  0x1040e4ull},
1024*4882a593Smuzhiyun 	{ NULL, 0}
1025*4882a593Smuzhiyun };
1026*4882a593Smuzhiyun 
1027*4882a593Smuzhiyun static struct qm_dfx_registers qm_vf_dfx_regs[] = {
1028*4882a593Smuzhiyun 	{"QM_DFX_FUNS_ACTIVE_ST         ",  0x200ull},
1029*4882a593Smuzhiyun 	{ NULL, 0}
1030*4882a593Smuzhiyun };
1031*4882a593Smuzhiyun 
qm_regs_show(struct seq_file * s,void * unused)1032*4882a593Smuzhiyun static int qm_regs_show(struct seq_file *s, void *unused)
1033*4882a593Smuzhiyun {
1034*4882a593Smuzhiyun 	struct hisi_qm *qm = s->private;
1035*4882a593Smuzhiyun 	struct qm_dfx_registers *regs;
1036*4882a593Smuzhiyun 	u32 val;
1037*4882a593Smuzhiyun 
1038*4882a593Smuzhiyun 	if (qm->fun_type == QM_HW_PF)
1039*4882a593Smuzhiyun 		regs = qm_dfx_regs;
1040*4882a593Smuzhiyun 	else
1041*4882a593Smuzhiyun 		regs = qm_vf_dfx_regs;
1042*4882a593Smuzhiyun 
1043*4882a593Smuzhiyun 	while (regs->reg_name) {
1044*4882a593Smuzhiyun 		val = readl(qm->io_base + regs->reg_offset);
1045*4882a593Smuzhiyun 		seq_printf(s, "%s= 0x%08x\n", regs->reg_name, val);
1046*4882a593Smuzhiyun 		regs++;
1047*4882a593Smuzhiyun 	}
1048*4882a593Smuzhiyun 
1049*4882a593Smuzhiyun 	return 0;
1050*4882a593Smuzhiyun }
1051*4882a593Smuzhiyun 
1052*4882a593Smuzhiyun DEFINE_SHOW_ATTRIBUTE(qm_regs);
1053*4882a593Smuzhiyun 
qm_cmd_read(struct file * filp,char __user * buffer,size_t count,loff_t * pos)1054*4882a593Smuzhiyun static ssize_t qm_cmd_read(struct file *filp, char __user *buffer,
1055*4882a593Smuzhiyun 			   size_t count, loff_t *pos)
1056*4882a593Smuzhiyun {
1057*4882a593Smuzhiyun 	char buf[QM_DBG_READ_LEN];
1058*4882a593Smuzhiyun 	int len;
1059*4882a593Smuzhiyun 
1060*4882a593Smuzhiyun 	len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n",
1061*4882a593Smuzhiyun 			"Please echo help to cmd to get help information");
1062*4882a593Smuzhiyun 
1063*4882a593Smuzhiyun 	return simple_read_from_buffer(buffer, count, pos, buf, len);
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun 
qm_ctx_alloc(struct hisi_qm * qm,size_t ctx_size,dma_addr_t * dma_addr)1066*4882a593Smuzhiyun static void *qm_ctx_alloc(struct hisi_qm *qm, size_t ctx_size,
1067*4882a593Smuzhiyun 			  dma_addr_t *dma_addr)
1068*4882a593Smuzhiyun {
1069*4882a593Smuzhiyun 	struct device *dev = &qm->pdev->dev;
1070*4882a593Smuzhiyun 	void *ctx_addr;
1071*4882a593Smuzhiyun 
1072*4882a593Smuzhiyun 	ctx_addr = kzalloc(ctx_size, GFP_KERNEL);
1073*4882a593Smuzhiyun 	if (!ctx_addr)
1074*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
1075*4882a593Smuzhiyun 
1076*4882a593Smuzhiyun 	*dma_addr = dma_map_single(dev, ctx_addr, ctx_size, DMA_FROM_DEVICE);
1077*4882a593Smuzhiyun 	if (dma_mapping_error(dev, *dma_addr)) {
1078*4882a593Smuzhiyun 		dev_err(dev, "DMA mapping error!\n");
1079*4882a593Smuzhiyun 		kfree(ctx_addr);
1080*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
1081*4882a593Smuzhiyun 	}
1082*4882a593Smuzhiyun 
1083*4882a593Smuzhiyun 	return ctx_addr;
1084*4882a593Smuzhiyun }
1085*4882a593Smuzhiyun 
qm_ctx_free(struct hisi_qm * qm,size_t ctx_size,const void * ctx_addr,dma_addr_t * dma_addr)1086*4882a593Smuzhiyun static void qm_ctx_free(struct hisi_qm *qm, size_t ctx_size,
1087*4882a593Smuzhiyun 			const void *ctx_addr, dma_addr_t *dma_addr)
1088*4882a593Smuzhiyun {
1089*4882a593Smuzhiyun 	struct device *dev = &qm->pdev->dev;
1090*4882a593Smuzhiyun 
1091*4882a593Smuzhiyun 	dma_unmap_single(dev, *dma_addr, ctx_size, DMA_FROM_DEVICE);
1092*4882a593Smuzhiyun 	kfree(ctx_addr);
1093*4882a593Smuzhiyun }
1094*4882a593Smuzhiyun 
dump_show(struct hisi_qm * qm,void * info,unsigned int info_size,char * info_name)1095*4882a593Smuzhiyun static int dump_show(struct hisi_qm *qm, void *info,
1096*4882a593Smuzhiyun 		     unsigned int info_size, char *info_name)
1097*4882a593Smuzhiyun {
1098*4882a593Smuzhiyun 	struct device *dev = &qm->pdev->dev;
1099*4882a593Smuzhiyun 	u8 *info_buf, *info_curr = info;
1100*4882a593Smuzhiyun 	u32 i;
1101*4882a593Smuzhiyun #define BYTE_PER_DW	4
1102*4882a593Smuzhiyun 
1103*4882a593Smuzhiyun 	info_buf = kzalloc(info_size, GFP_KERNEL);
1104*4882a593Smuzhiyun 	if (!info_buf)
1105*4882a593Smuzhiyun 		return -ENOMEM;
1106*4882a593Smuzhiyun 
1107*4882a593Smuzhiyun 	for (i = 0; i < info_size; i++, info_curr++) {
1108*4882a593Smuzhiyun 		if (i % BYTE_PER_DW == 0)
1109*4882a593Smuzhiyun 			info_buf[i + 3UL] = *info_curr;
1110*4882a593Smuzhiyun 		else if (i % BYTE_PER_DW == 1)
1111*4882a593Smuzhiyun 			info_buf[i + 1UL] = *info_curr;
1112*4882a593Smuzhiyun 		else if (i % BYTE_PER_DW == 2)
1113*4882a593Smuzhiyun 			info_buf[i - 1] = *info_curr;
1114*4882a593Smuzhiyun 		else if (i % BYTE_PER_DW == 3)
1115*4882a593Smuzhiyun 			info_buf[i - 3] = *info_curr;
1116*4882a593Smuzhiyun 	}
1117*4882a593Smuzhiyun 
1118*4882a593Smuzhiyun 	dev_info(dev, "%s DUMP\n", info_name);
1119*4882a593Smuzhiyun 	for (i = 0; i < info_size; i += BYTE_PER_DW) {
1120*4882a593Smuzhiyun 		pr_info("DW%d: %02X%02X %02X%02X\n", i / BYTE_PER_DW,
1121*4882a593Smuzhiyun 			info_buf[i], info_buf[i + 1UL],
1122*4882a593Smuzhiyun 			info_buf[i + 2UL], info_buf[i + 3UL]);
1123*4882a593Smuzhiyun 	}
1124*4882a593Smuzhiyun 
1125*4882a593Smuzhiyun 	kfree(info_buf);
1126*4882a593Smuzhiyun 
1127*4882a593Smuzhiyun 	return 0;
1128*4882a593Smuzhiyun }
1129*4882a593Smuzhiyun 
qm_dump_sqc_raw(struct hisi_qm * qm,dma_addr_t dma_addr,u16 qp_id)1130*4882a593Smuzhiyun static int qm_dump_sqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
1131*4882a593Smuzhiyun {
1132*4882a593Smuzhiyun 	return qm_mb(qm, QM_MB_CMD_SQC, dma_addr, qp_id, 1);
1133*4882a593Smuzhiyun }
1134*4882a593Smuzhiyun 
qm_dump_cqc_raw(struct hisi_qm * qm,dma_addr_t dma_addr,u16 qp_id)1135*4882a593Smuzhiyun static int qm_dump_cqc_raw(struct hisi_qm *qm, dma_addr_t dma_addr, u16 qp_id)
1136*4882a593Smuzhiyun {
1137*4882a593Smuzhiyun 	return qm_mb(qm, QM_MB_CMD_CQC, dma_addr, qp_id, 1);
1138*4882a593Smuzhiyun }
1139*4882a593Smuzhiyun 
qm_sqc_dump(struct hisi_qm * qm,const char * s)1140*4882a593Smuzhiyun static int qm_sqc_dump(struct hisi_qm *qm, const char *s)
1141*4882a593Smuzhiyun {
1142*4882a593Smuzhiyun 	struct device *dev = &qm->pdev->dev;
1143*4882a593Smuzhiyun 	struct qm_sqc *sqc, *sqc_curr;
1144*4882a593Smuzhiyun 	dma_addr_t sqc_dma;
1145*4882a593Smuzhiyun 	u32 qp_id;
1146*4882a593Smuzhiyun 	int ret;
1147*4882a593Smuzhiyun 
1148*4882a593Smuzhiyun 	if (!s)
1149*4882a593Smuzhiyun 		return -EINVAL;
1150*4882a593Smuzhiyun 
1151*4882a593Smuzhiyun 	ret = kstrtou32(s, 0, &qp_id);
1152*4882a593Smuzhiyun 	if (ret || qp_id >= qm->qp_num) {
1153*4882a593Smuzhiyun 		dev_err(dev, "Please input qp num (0-%d)", qm->qp_num - 1);
1154*4882a593Smuzhiyun 		return -EINVAL;
1155*4882a593Smuzhiyun 	}
1156*4882a593Smuzhiyun 
1157*4882a593Smuzhiyun 	sqc = qm_ctx_alloc(qm, sizeof(*sqc), &sqc_dma);
1158*4882a593Smuzhiyun 	if (IS_ERR(sqc))
1159*4882a593Smuzhiyun 		return PTR_ERR(sqc);
1160*4882a593Smuzhiyun 
1161*4882a593Smuzhiyun 	ret = qm_dump_sqc_raw(qm, sqc_dma, qp_id);
1162*4882a593Smuzhiyun 	if (ret) {
1163*4882a593Smuzhiyun 		down_read(&qm->qps_lock);
1164*4882a593Smuzhiyun 		if (qm->sqc) {
1165*4882a593Smuzhiyun 			sqc_curr = qm->sqc + qp_id;
1166*4882a593Smuzhiyun 
1167*4882a593Smuzhiyun 			ret = dump_show(qm, sqc_curr, sizeof(*sqc),
1168*4882a593Smuzhiyun 					"SOFT SQC");
1169*4882a593Smuzhiyun 			if (ret)
1170*4882a593Smuzhiyun 				dev_info(dev, "Show soft sqc failed!\n");
1171*4882a593Smuzhiyun 		}
1172*4882a593Smuzhiyun 		up_read(&qm->qps_lock);
1173*4882a593Smuzhiyun 
1174*4882a593Smuzhiyun 		goto err_free_ctx;
1175*4882a593Smuzhiyun 	}
1176*4882a593Smuzhiyun 
1177*4882a593Smuzhiyun 	ret = dump_show(qm, sqc, sizeof(*sqc), "SQC");
1178*4882a593Smuzhiyun 	if (ret)
1179*4882a593Smuzhiyun 		dev_info(dev, "Show hw sqc failed!\n");
1180*4882a593Smuzhiyun 
1181*4882a593Smuzhiyun err_free_ctx:
1182*4882a593Smuzhiyun 	qm_ctx_free(qm, sizeof(*sqc), sqc, &sqc_dma);
1183*4882a593Smuzhiyun 	return ret;
1184*4882a593Smuzhiyun }
1185*4882a593Smuzhiyun 
qm_cqc_dump(struct hisi_qm * qm,const char * s)1186*4882a593Smuzhiyun static int qm_cqc_dump(struct hisi_qm *qm, const char *s)
1187*4882a593Smuzhiyun {
1188*4882a593Smuzhiyun 	struct device *dev = &qm->pdev->dev;
1189*4882a593Smuzhiyun 	struct qm_cqc *cqc, *cqc_curr;
1190*4882a593Smuzhiyun 	dma_addr_t cqc_dma;
1191*4882a593Smuzhiyun 	u32 qp_id;
1192*4882a593Smuzhiyun 	int ret;
1193*4882a593Smuzhiyun 
1194*4882a593Smuzhiyun 	if (!s)
1195*4882a593Smuzhiyun 		return -EINVAL;
1196*4882a593Smuzhiyun 
1197*4882a593Smuzhiyun 	ret = kstrtou32(s, 0, &qp_id);
1198*4882a593Smuzhiyun 	if (ret || qp_id >= qm->qp_num) {
1199*4882a593Smuzhiyun 		dev_err(dev, "Please input qp num (0-%d)", qm->qp_num - 1);
1200*4882a593Smuzhiyun 		return -EINVAL;
1201*4882a593Smuzhiyun 	}
1202*4882a593Smuzhiyun 
1203*4882a593Smuzhiyun 	cqc = qm_ctx_alloc(qm, sizeof(*cqc), &cqc_dma);
1204*4882a593Smuzhiyun 	if (IS_ERR(cqc))
1205*4882a593Smuzhiyun 		return PTR_ERR(cqc);
1206*4882a593Smuzhiyun 
1207*4882a593Smuzhiyun 	ret = qm_dump_cqc_raw(qm, cqc_dma, qp_id);
1208*4882a593Smuzhiyun 	if (ret) {
1209*4882a593Smuzhiyun 		down_read(&qm->qps_lock);
1210*4882a593Smuzhiyun 		if (qm->cqc) {
1211*4882a593Smuzhiyun 			cqc_curr = qm->cqc + qp_id;
1212*4882a593Smuzhiyun 
1213*4882a593Smuzhiyun 			ret = dump_show(qm, cqc_curr, sizeof(*cqc),
1214*4882a593Smuzhiyun 					"SOFT CQC");
1215*4882a593Smuzhiyun 			if (ret)
1216*4882a593Smuzhiyun 				dev_info(dev, "Show soft cqc failed!\n");
1217*4882a593Smuzhiyun 		}
1218*4882a593Smuzhiyun 		up_read(&qm->qps_lock);
1219*4882a593Smuzhiyun 
1220*4882a593Smuzhiyun 		goto err_free_ctx;
1221*4882a593Smuzhiyun 	}
1222*4882a593Smuzhiyun 
1223*4882a593Smuzhiyun 	ret = dump_show(qm, cqc, sizeof(*cqc), "CQC");
1224*4882a593Smuzhiyun 	if (ret)
1225*4882a593Smuzhiyun 		dev_info(dev, "Show hw cqc failed!\n");
1226*4882a593Smuzhiyun 
1227*4882a593Smuzhiyun err_free_ctx:
1228*4882a593Smuzhiyun 	qm_ctx_free(qm, sizeof(*cqc), cqc, &cqc_dma);
1229*4882a593Smuzhiyun 	return ret;
1230*4882a593Smuzhiyun }
1231*4882a593Smuzhiyun 
qm_eqc_aeqc_dump(struct hisi_qm * qm,char * s,size_t size,int cmd,char * name)1232*4882a593Smuzhiyun static int qm_eqc_aeqc_dump(struct hisi_qm *qm, char *s, size_t size,
1233*4882a593Smuzhiyun 			    int cmd, char *name)
1234*4882a593Smuzhiyun {
1235*4882a593Smuzhiyun 	struct device *dev = &qm->pdev->dev;
1236*4882a593Smuzhiyun 	dma_addr_t xeqc_dma;
1237*4882a593Smuzhiyun 	void *xeqc;
1238*4882a593Smuzhiyun 	int ret;
1239*4882a593Smuzhiyun 
1240*4882a593Smuzhiyun 	if (strsep(&s, " ")) {
1241*4882a593Smuzhiyun 		dev_err(dev, "Please do not input extra characters!\n");
1242*4882a593Smuzhiyun 		return -EINVAL;
1243*4882a593Smuzhiyun 	}
1244*4882a593Smuzhiyun 
1245*4882a593Smuzhiyun 	xeqc = qm_ctx_alloc(qm, size, &xeqc_dma);
1246*4882a593Smuzhiyun 	if (IS_ERR(xeqc))
1247*4882a593Smuzhiyun 		return PTR_ERR(xeqc);
1248*4882a593Smuzhiyun 
1249*4882a593Smuzhiyun 	ret = qm_mb(qm, cmd, xeqc_dma, 0, 1);
1250*4882a593Smuzhiyun 	if (ret)
1251*4882a593Smuzhiyun 		goto err_free_ctx;
1252*4882a593Smuzhiyun 
1253*4882a593Smuzhiyun 	ret = dump_show(qm, xeqc, size, name);
1254*4882a593Smuzhiyun 	if (ret)
1255*4882a593Smuzhiyun 		dev_info(dev, "Show hw %s failed!\n", name);
1256*4882a593Smuzhiyun 
1257*4882a593Smuzhiyun err_free_ctx:
1258*4882a593Smuzhiyun 	qm_ctx_free(qm, size, xeqc, &xeqc_dma);
1259*4882a593Smuzhiyun 	return ret;
1260*4882a593Smuzhiyun }
1261*4882a593Smuzhiyun 
q_dump_param_parse(struct hisi_qm * qm,char * s,u32 * e_id,u32 * q_id)1262*4882a593Smuzhiyun static int q_dump_param_parse(struct hisi_qm *qm, char *s,
1263*4882a593Smuzhiyun 			      u32 *e_id, u32 *q_id)
1264*4882a593Smuzhiyun {
1265*4882a593Smuzhiyun 	struct device *dev = &qm->pdev->dev;
1266*4882a593Smuzhiyun 	unsigned int qp_num = qm->qp_num;
1267*4882a593Smuzhiyun 	char *presult;
1268*4882a593Smuzhiyun 	int ret;
1269*4882a593Smuzhiyun 
1270*4882a593Smuzhiyun 	presult = strsep(&s, " ");
1271*4882a593Smuzhiyun 	if (!presult) {
1272*4882a593Smuzhiyun 		dev_err(dev, "Please input qp number!\n");
1273*4882a593Smuzhiyun 		return -EINVAL;
1274*4882a593Smuzhiyun 	}
1275*4882a593Smuzhiyun 
1276*4882a593Smuzhiyun 	ret = kstrtou32(presult, 0, q_id);
1277*4882a593Smuzhiyun 	if (ret || *q_id >= qp_num) {
1278*4882a593Smuzhiyun 		dev_err(dev, "Please input qp num (0-%d)", qp_num - 1);
1279*4882a593Smuzhiyun 		return -EINVAL;
1280*4882a593Smuzhiyun 	}
1281*4882a593Smuzhiyun 
1282*4882a593Smuzhiyun 	presult = strsep(&s, " ");
1283*4882a593Smuzhiyun 	if (!presult) {
1284*4882a593Smuzhiyun 		dev_err(dev, "Please input sqe number!\n");
1285*4882a593Smuzhiyun 		return -EINVAL;
1286*4882a593Smuzhiyun 	}
1287*4882a593Smuzhiyun 
1288*4882a593Smuzhiyun 	ret = kstrtou32(presult, 0, e_id);
1289*4882a593Smuzhiyun 	if (ret || *e_id >= QM_Q_DEPTH) {
1290*4882a593Smuzhiyun 		dev_err(dev, "Please input sqe num (0-%d)", QM_Q_DEPTH - 1);
1291*4882a593Smuzhiyun 		return -EINVAL;
1292*4882a593Smuzhiyun 	}
1293*4882a593Smuzhiyun 
1294*4882a593Smuzhiyun 	if (strsep(&s, " ")) {
1295*4882a593Smuzhiyun 		dev_err(dev, "Please do not input extra characters!\n");
1296*4882a593Smuzhiyun 		return -EINVAL;
1297*4882a593Smuzhiyun 	}
1298*4882a593Smuzhiyun 
1299*4882a593Smuzhiyun 	return 0;
1300*4882a593Smuzhiyun }
1301*4882a593Smuzhiyun 
qm_sq_dump(struct hisi_qm * qm,char * s)1302*4882a593Smuzhiyun static int qm_sq_dump(struct hisi_qm *qm, char *s)
1303*4882a593Smuzhiyun {
1304*4882a593Smuzhiyun 	struct device *dev = &qm->pdev->dev;
1305*4882a593Smuzhiyun 	void *sqe, *sqe_curr;
1306*4882a593Smuzhiyun 	struct hisi_qp *qp;
1307*4882a593Smuzhiyun 	u32 qp_id, sqe_id;
1308*4882a593Smuzhiyun 	int ret;
1309*4882a593Smuzhiyun 
1310*4882a593Smuzhiyun 	ret = q_dump_param_parse(qm, s, &sqe_id, &qp_id);
1311*4882a593Smuzhiyun 	if (ret)
1312*4882a593Smuzhiyun 		return ret;
1313*4882a593Smuzhiyun 
1314*4882a593Smuzhiyun 	sqe = kzalloc(qm->sqe_size * QM_Q_DEPTH, GFP_KERNEL);
1315*4882a593Smuzhiyun 	if (!sqe)
1316*4882a593Smuzhiyun 		return -ENOMEM;
1317*4882a593Smuzhiyun 
1318*4882a593Smuzhiyun 	qp = &qm->qp_array[qp_id];
1319*4882a593Smuzhiyun 	memcpy(sqe, qp->sqe, qm->sqe_size * QM_Q_DEPTH);
1320*4882a593Smuzhiyun 	sqe_curr = sqe + (u32)(sqe_id * qm->sqe_size);
1321*4882a593Smuzhiyun 	memset(sqe_curr + qm->debug.sqe_mask_offset, QM_SQE_ADDR_MASK,
1322*4882a593Smuzhiyun 	       qm->debug.sqe_mask_len);
1323*4882a593Smuzhiyun 
1324*4882a593Smuzhiyun 	ret = dump_show(qm, sqe_curr, qm->sqe_size, "SQE");
1325*4882a593Smuzhiyun 	if (ret)
1326*4882a593Smuzhiyun 		dev_info(dev, "Show sqe failed!\n");
1327*4882a593Smuzhiyun 
1328*4882a593Smuzhiyun 	kfree(sqe);
1329*4882a593Smuzhiyun 
1330*4882a593Smuzhiyun 	return ret;
1331*4882a593Smuzhiyun }
1332*4882a593Smuzhiyun 
qm_cq_dump(struct hisi_qm * qm,char * s)1333*4882a593Smuzhiyun static int qm_cq_dump(struct hisi_qm *qm, char *s)
1334*4882a593Smuzhiyun {
1335*4882a593Smuzhiyun 	struct device *dev = &qm->pdev->dev;
1336*4882a593Smuzhiyun 	struct qm_cqe *cqe_curr;
1337*4882a593Smuzhiyun 	struct hisi_qp *qp;
1338*4882a593Smuzhiyun 	u32 qp_id, cqe_id;
1339*4882a593Smuzhiyun 	int ret;
1340*4882a593Smuzhiyun 
1341*4882a593Smuzhiyun 	ret = q_dump_param_parse(qm, s, &cqe_id, &qp_id);
1342*4882a593Smuzhiyun 	if (ret)
1343*4882a593Smuzhiyun 		return ret;
1344*4882a593Smuzhiyun 
1345*4882a593Smuzhiyun 	qp = &qm->qp_array[qp_id];
1346*4882a593Smuzhiyun 	cqe_curr = qp->cqe + cqe_id;
1347*4882a593Smuzhiyun 	ret = dump_show(qm, cqe_curr, sizeof(struct qm_cqe), "CQE");
1348*4882a593Smuzhiyun 	if (ret)
1349*4882a593Smuzhiyun 		dev_info(dev, "Show cqe failed!\n");
1350*4882a593Smuzhiyun 
1351*4882a593Smuzhiyun 	return ret;
1352*4882a593Smuzhiyun }
1353*4882a593Smuzhiyun 
qm_eq_aeq_dump(struct hisi_qm * qm,const char * s,size_t size,char * name)1354*4882a593Smuzhiyun static int qm_eq_aeq_dump(struct hisi_qm *qm, const char *s,
1355*4882a593Smuzhiyun 			  size_t size, char *name)
1356*4882a593Smuzhiyun {
1357*4882a593Smuzhiyun 	struct device *dev = &qm->pdev->dev;
1358*4882a593Smuzhiyun 	void *xeqe;
1359*4882a593Smuzhiyun 	u32 xeqe_id;
1360*4882a593Smuzhiyun 	int ret;
1361*4882a593Smuzhiyun 
1362*4882a593Smuzhiyun 	if (!s)
1363*4882a593Smuzhiyun 		return -EINVAL;
1364*4882a593Smuzhiyun 
1365*4882a593Smuzhiyun 	ret = kstrtou32(s, 0, &xeqe_id);
1366*4882a593Smuzhiyun 	if (ret)
1367*4882a593Smuzhiyun 		return -EINVAL;
1368*4882a593Smuzhiyun 
1369*4882a593Smuzhiyun 	if (!strcmp(name, "EQE") && xeqe_id >= QM_EQ_DEPTH) {
1370*4882a593Smuzhiyun 		dev_err(dev, "Please input eqe num (0-%d)", QM_EQ_DEPTH - 1);
1371*4882a593Smuzhiyun 		return -EINVAL;
1372*4882a593Smuzhiyun 	} else if (!strcmp(name, "AEQE") && xeqe_id >= QM_Q_DEPTH) {
1373*4882a593Smuzhiyun 		dev_err(dev, "Please input aeqe num (0-%d)", QM_Q_DEPTH - 1);
1374*4882a593Smuzhiyun 		return -EINVAL;
1375*4882a593Smuzhiyun 	}
1376*4882a593Smuzhiyun 
1377*4882a593Smuzhiyun 	down_read(&qm->qps_lock);
1378*4882a593Smuzhiyun 
1379*4882a593Smuzhiyun 	if (qm->eqe && !strcmp(name, "EQE")) {
1380*4882a593Smuzhiyun 		xeqe = qm->eqe + xeqe_id;
1381*4882a593Smuzhiyun 	} else if (qm->aeqe && !strcmp(name, "AEQE")) {
1382*4882a593Smuzhiyun 		xeqe = qm->aeqe + xeqe_id;
1383*4882a593Smuzhiyun 	} else {
1384*4882a593Smuzhiyun 		ret = -EINVAL;
1385*4882a593Smuzhiyun 		goto err_unlock;
1386*4882a593Smuzhiyun 	}
1387*4882a593Smuzhiyun 
1388*4882a593Smuzhiyun 	ret = dump_show(qm, xeqe, size, name);
1389*4882a593Smuzhiyun 	if (ret)
1390*4882a593Smuzhiyun 		dev_info(dev, "Show %s failed!\n", name);
1391*4882a593Smuzhiyun 
1392*4882a593Smuzhiyun err_unlock:
1393*4882a593Smuzhiyun 	up_read(&qm->qps_lock);
1394*4882a593Smuzhiyun 	return ret;
1395*4882a593Smuzhiyun }
1396*4882a593Smuzhiyun 
qm_dbg_help(struct hisi_qm * qm,char * s)1397*4882a593Smuzhiyun static int qm_dbg_help(struct hisi_qm *qm, char *s)
1398*4882a593Smuzhiyun {
1399*4882a593Smuzhiyun 	struct device *dev = &qm->pdev->dev;
1400*4882a593Smuzhiyun 
1401*4882a593Smuzhiyun 	if (strsep(&s, " ")) {
1402*4882a593Smuzhiyun 		dev_err(dev, "Please do not input extra characters!\n");
1403*4882a593Smuzhiyun 		return -EINVAL;
1404*4882a593Smuzhiyun 	}
1405*4882a593Smuzhiyun 
1406*4882a593Smuzhiyun 	dev_info(dev, "available commands:\n");
1407*4882a593Smuzhiyun 	dev_info(dev, "sqc <num>\n");
1408*4882a593Smuzhiyun 	dev_info(dev, "cqc <num>\n");
1409*4882a593Smuzhiyun 	dev_info(dev, "eqc\n");
1410*4882a593Smuzhiyun 	dev_info(dev, "aeqc\n");
1411*4882a593Smuzhiyun 	dev_info(dev, "sq <num> <e>\n");
1412*4882a593Smuzhiyun 	dev_info(dev, "cq <num> <e>\n");
1413*4882a593Smuzhiyun 	dev_info(dev, "eq <e>\n");
1414*4882a593Smuzhiyun 	dev_info(dev, "aeq <e>\n");
1415*4882a593Smuzhiyun 
1416*4882a593Smuzhiyun 	return 0;
1417*4882a593Smuzhiyun }
1418*4882a593Smuzhiyun 
qm_cmd_write_dump(struct hisi_qm * qm,const char * cmd_buf)1419*4882a593Smuzhiyun static int qm_cmd_write_dump(struct hisi_qm *qm, const char *cmd_buf)
1420*4882a593Smuzhiyun {
1421*4882a593Smuzhiyun 	struct device *dev = &qm->pdev->dev;
1422*4882a593Smuzhiyun 	char *presult, *s, *s_tmp;
1423*4882a593Smuzhiyun 	int ret;
1424*4882a593Smuzhiyun 
1425*4882a593Smuzhiyun 	s = kstrdup(cmd_buf, GFP_KERNEL);
1426*4882a593Smuzhiyun 	if (!s)
1427*4882a593Smuzhiyun 		return -ENOMEM;
1428*4882a593Smuzhiyun 
1429*4882a593Smuzhiyun 	s_tmp = s;
1430*4882a593Smuzhiyun 	presult = strsep(&s, " ");
1431*4882a593Smuzhiyun 	if (!presult) {
1432*4882a593Smuzhiyun 		ret = -EINVAL;
1433*4882a593Smuzhiyun 		goto err_buffer_free;
1434*4882a593Smuzhiyun 	}
1435*4882a593Smuzhiyun 
1436*4882a593Smuzhiyun 	if (!strcmp(presult, "sqc"))
1437*4882a593Smuzhiyun 		ret = qm_sqc_dump(qm, s);
1438*4882a593Smuzhiyun 	else if (!strcmp(presult, "cqc"))
1439*4882a593Smuzhiyun 		ret = qm_cqc_dump(qm, s);
1440*4882a593Smuzhiyun 	else if (!strcmp(presult, "eqc"))
1441*4882a593Smuzhiyun 		ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_eqc),
1442*4882a593Smuzhiyun 				       QM_MB_CMD_EQC, "EQC");
1443*4882a593Smuzhiyun 	else if (!strcmp(presult, "aeqc"))
1444*4882a593Smuzhiyun 		ret = qm_eqc_aeqc_dump(qm, s, sizeof(struct qm_aeqc),
1445*4882a593Smuzhiyun 				       QM_MB_CMD_AEQC, "AEQC");
1446*4882a593Smuzhiyun 	else if (!strcmp(presult, "sq"))
1447*4882a593Smuzhiyun 		ret = qm_sq_dump(qm, s);
1448*4882a593Smuzhiyun 	else if (!strcmp(presult, "cq"))
1449*4882a593Smuzhiyun 		ret = qm_cq_dump(qm, s);
1450*4882a593Smuzhiyun 	else if (!strcmp(presult, "eq"))
1451*4882a593Smuzhiyun 		ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_eqe), "EQE");
1452*4882a593Smuzhiyun 	else if (!strcmp(presult, "aeq"))
1453*4882a593Smuzhiyun 		ret = qm_eq_aeq_dump(qm, s, sizeof(struct qm_aeqe), "AEQE");
1454*4882a593Smuzhiyun 	else if (!strcmp(presult, "help"))
1455*4882a593Smuzhiyun 		ret = qm_dbg_help(qm, s);
1456*4882a593Smuzhiyun 	else
1457*4882a593Smuzhiyun 		ret = -EINVAL;
1458*4882a593Smuzhiyun 
1459*4882a593Smuzhiyun 	if (ret)
1460*4882a593Smuzhiyun 		dev_info(dev, "Please echo help\n");
1461*4882a593Smuzhiyun 
1462*4882a593Smuzhiyun err_buffer_free:
1463*4882a593Smuzhiyun 	kfree(s_tmp);
1464*4882a593Smuzhiyun 
1465*4882a593Smuzhiyun 	return ret;
1466*4882a593Smuzhiyun }
1467*4882a593Smuzhiyun 
qm_cmd_write(struct file * filp,const char __user * buffer,size_t count,loff_t * pos)1468*4882a593Smuzhiyun static ssize_t qm_cmd_write(struct file *filp, const char __user *buffer,
1469*4882a593Smuzhiyun 			    size_t count, loff_t *pos)
1470*4882a593Smuzhiyun {
1471*4882a593Smuzhiyun 	struct hisi_qm *qm = filp->private_data;
1472*4882a593Smuzhiyun 	char *cmd_buf, *cmd_buf_tmp;
1473*4882a593Smuzhiyun 	int ret;
1474*4882a593Smuzhiyun 
1475*4882a593Smuzhiyun 	if (*pos)
1476*4882a593Smuzhiyun 		return 0;
1477*4882a593Smuzhiyun 
1478*4882a593Smuzhiyun 	/* Judge if the instance is being reset. */
1479*4882a593Smuzhiyun 	if (unlikely(atomic_read(&qm->status.flags) == QM_STOP))
1480*4882a593Smuzhiyun 		return 0;
1481*4882a593Smuzhiyun 
1482*4882a593Smuzhiyun 	if (count > QM_DBG_WRITE_LEN)
1483*4882a593Smuzhiyun 		return -ENOSPC;
1484*4882a593Smuzhiyun 
1485*4882a593Smuzhiyun 	cmd_buf = kzalloc(count + 1, GFP_KERNEL);
1486*4882a593Smuzhiyun 	if (!cmd_buf)
1487*4882a593Smuzhiyun 		return -ENOMEM;
1488*4882a593Smuzhiyun 
1489*4882a593Smuzhiyun 	if (copy_from_user(cmd_buf, buffer, count)) {
1490*4882a593Smuzhiyun 		kfree(cmd_buf);
1491*4882a593Smuzhiyun 		return -EFAULT;
1492*4882a593Smuzhiyun 	}
1493*4882a593Smuzhiyun 
1494*4882a593Smuzhiyun 	cmd_buf[count] = '\0';
1495*4882a593Smuzhiyun 
1496*4882a593Smuzhiyun 	cmd_buf_tmp = strchr(cmd_buf, '\n');
1497*4882a593Smuzhiyun 	if (cmd_buf_tmp) {
1498*4882a593Smuzhiyun 		*cmd_buf_tmp = '\0';
1499*4882a593Smuzhiyun 		count = cmd_buf_tmp - cmd_buf + 1;
1500*4882a593Smuzhiyun 	}
1501*4882a593Smuzhiyun 
1502*4882a593Smuzhiyun 	ret = qm_cmd_write_dump(qm, cmd_buf);
1503*4882a593Smuzhiyun 	if (ret) {
1504*4882a593Smuzhiyun 		kfree(cmd_buf);
1505*4882a593Smuzhiyun 		return ret;
1506*4882a593Smuzhiyun 	}
1507*4882a593Smuzhiyun 
1508*4882a593Smuzhiyun 	kfree(cmd_buf);
1509*4882a593Smuzhiyun 
1510*4882a593Smuzhiyun 	return count;
1511*4882a593Smuzhiyun }
1512*4882a593Smuzhiyun 
1513*4882a593Smuzhiyun static const struct file_operations qm_cmd_fops = {
1514*4882a593Smuzhiyun 	.owner = THIS_MODULE,
1515*4882a593Smuzhiyun 	.open = simple_open,
1516*4882a593Smuzhiyun 	.read = qm_cmd_read,
1517*4882a593Smuzhiyun 	.write = qm_cmd_write,
1518*4882a593Smuzhiyun };
1519*4882a593Smuzhiyun 
qm_create_debugfs_file(struct hisi_qm * qm,enum qm_debug_file index)1520*4882a593Smuzhiyun static int qm_create_debugfs_file(struct hisi_qm *qm, enum qm_debug_file index)
1521*4882a593Smuzhiyun {
1522*4882a593Smuzhiyun 	struct dentry *qm_d = qm->debug.qm_d;
1523*4882a593Smuzhiyun 	struct debugfs_file *file = qm->debug.files + index;
1524*4882a593Smuzhiyun 
1525*4882a593Smuzhiyun 	debugfs_create_file(qm_debug_file_name[index], 0600, qm_d, file,
1526*4882a593Smuzhiyun 			    &qm_debug_fops);
1527*4882a593Smuzhiyun 
1528*4882a593Smuzhiyun 	file->index = index;
1529*4882a593Smuzhiyun 	mutex_init(&file->lock);
1530*4882a593Smuzhiyun 	file->debug = &qm->debug;
1531*4882a593Smuzhiyun 
1532*4882a593Smuzhiyun 	return 0;
1533*4882a593Smuzhiyun }
1534*4882a593Smuzhiyun 
qm_hw_error_init_v1(struct hisi_qm * qm,u32 ce,u32 nfe,u32 fe)1535*4882a593Smuzhiyun static void qm_hw_error_init_v1(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
1536*4882a593Smuzhiyun {
1537*4882a593Smuzhiyun 	writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
1538*4882a593Smuzhiyun }
1539*4882a593Smuzhiyun 
qm_hw_error_init_v2(struct hisi_qm * qm,u32 ce,u32 nfe,u32 fe)1540*4882a593Smuzhiyun static void qm_hw_error_init_v2(struct hisi_qm *qm, u32 ce, u32 nfe, u32 fe)
1541*4882a593Smuzhiyun {
1542*4882a593Smuzhiyun 	u32 irq_enable = ce | nfe | fe;
1543*4882a593Smuzhiyun 	u32 irq_unmask = ~irq_enable;
1544*4882a593Smuzhiyun 
1545*4882a593Smuzhiyun 	qm->error_mask = ce | nfe | fe;
1546*4882a593Smuzhiyun 
1547*4882a593Smuzhiyun 	/* clear QM hw residual error source */
1548*4882a593Smuzhiyun 	writel(QM_ABNORMAL_INT_SOURCE_CLR,
1549*4882a593Smuzhiyun 	       qm->io_base + QM_ABNORMAL_INT_SOURCE);
1550*4882a593Smuzhiyun 
1551*4882a593Smuzhiyun 	/* configure error type */
1552*4882a593Smuzhiyun 	writel(ce, qm->io_base + QM_RAS_CE_ENABLE);
1553*4882a593Smuzhiyun 	writel(QM_RAS_CE_TIMES_PER_IRQ, qm->io_base + QM_RAS_CE_THRESHOLD);
1554*4882a593Smuzhiyun 	writel(nfe, qm->io_base + QM_RAS_NFE_ENABLE);
1555*4882a593Smuzhiyun 	writel(fe, qm->io_base + QM_RAS_FE_ENABLE);
1556*4882a593Smuzhiyun 
1557*4882a593Smuzhiyun 	irq_unmask &= readl(qm->io_base + QM_ABNORMAL_INT_MASK);
1558*4882a593Smuzhiyun 	writel(irq_unmask, qm->io_base + QM_ABNORMAL_INT_MASK);
1559*4882a593Smuzhiyun }
1560*4882a593Smuzhiyun 
qm_hw_error_uninit_v2(struct hisi_qm * qm)1561*4882a593Smuzhiyun static void qm_hw_error_uninit_v2(struct hisi_qm *qm)
1562*4882a593Smuzhiyun {
1563*4882a593Smuzhiyun 	writel(QM_ABNORMAL_INT_MASK_VALUE, qm->io_base + QM_ABNORMAL_INT_MASK);
1564*4882a593Smuzhiyun }
1565*4882a593Smuzhiyun 
qm_log_hw_error(struct hisi_qm * qm,u32 error_status)1566*4882a593Smuzhiyun static void qm_log_hw_error(struct hisi_qm *qm, u32 error_status)
1567*4882a593Smuzhiyun {
1568*4882a593Smuzhiyun 	const struct hisi_qm_hw_error *err;
1569*4882a593Smuzhiyun 	struct device *dev = &qm->pdev->dev;
1570*4882a593Smuzhiyun 	u32 reg_val, type, vf_num;
1571*4882a593Smuzhiyun 	int i;
1572*4882a593Smuzhiyun 
1573*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(qm_hw_error); i++) {
1574*4882a593Smuzhiyun 		err = &qm_hw_error[i];
1575*4882a593Smuzhiyun 		if (!(err->int_msk & error_status))
1576*4882a593Smuzhiyun 			continue;
1577*4882a593Smuzhiyun 
1578*4882a593Smuzhiyun 		dev_err(dev, "%s [error status=0x%x] found\n",
1579*4882a593Smuzhiyun 			err->msg, err->int_msk);
1580*4882a593Smuzhiyun 
1581*4882a593Smuzhiyun 		if (err->int_msk & QM_DB_TIMEOUT) {
1582*4882a593Smuzhiyun 			reg_val = readl(qm->io_base + QM_ABNORMAL_INF01);
1583*4882a593Smuzhiyun 			type = (reg_val & QM_DB_TIMEOUT_TYPE) >>
1584*4882a593Smuzhiyun 			       QM_DB_TIMEOUT_TYPE_SHIFT;
1585*4882a593Smuzhiyun 			vf_num = reg_val & QM_DB_TIMEOUT_VF;
1586*4882a593Smuzhiyun 			dev_err(dev, "qm %s doorbell timeout in function %u\n",
1587*4882a593Smuzhiyun 				qm_db_timeout[type], vf_num);
1588*4882a593Smuzhiyun 		} else if (err->int_msk & QM_OF_FIFO_OF) {
1589*4882a593Smuzhiyun 			reg_val = readl(qm->io_base + QM_ABNORMAL_INF00);
1590*4882a593Smuzhiyun 			type = (reg_val & QM_FIFO_OVERFLOW_TYPE) >>
1591*4882a593Smuzhiyun 			       QM_FIFO_OVERFLOW_TYPE_SHIFT;
1592*4882a593Smuzhiyun 			vf_num = reg_val & QM_FIFO_OVERFLOW_VF;
1593*4882a593Smuzhiyun 
1594*4882a593Smuzhiyun 			if (type < ARRAY_SIZE(qm_fifo_overflow))
1595*4882a593Smuzhiyun 				dev_err(dev, "qm %s fifo overflow in function %u\n",
1596*4882a593Smuzhiyun 					qm_fifo_overflow[type], vf_num);
1597*4882a593Smuzhiyun 			else
1598*4882a593Smuzhiyun 				dev_err(dev, "unknown error type\n");
1599*4882a593Smuzhiyun 		}
1600*4882a593Smuzhiyun 	}
1601*4882a593Smuzhiyun }
1602*4882a593Smuzhiyun 
qm_hw_error_handle_v2(struct hisi_qm * qm)1603*4882a593Smuzhiyun static enum acc_err_result qm_hw_error_handle_v2(struct hisi_qm *qm)
1604*4882a593Smuzhiyun {
1605*4882a593Smuzhiyun 	u32 error_status, tmp;
1606*4882a593Smuzhiyun 
1607*4882a593Smuzhiyun 	/* read err sts */
1608*4882a593Smuzhiyun 	tmp = readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
1609*4882a593Smuzhiyun 	error_status = qm->error_mask & tmp;
1610*4882a593Smuzhiyun 
1611*4882a593Smuzhiyun 	if (error_status) {
1612*4882a593Smuzhiyun 		if (error_status & QM_ECC_MBIT)
1613*4882a593Smuzhiyun 			qm->err_status.is_qm_ecc_mbit = true;
1614*4882a593Smuzhiyun 
1615*4882a593Smuzhiyun 		qm_log_hw_error(qm, error_status);
1616*4882a593Smuzhiyun 		if (error_status == QM_DB_RANDOM_INVALID) {
1617*4882a593Smuzhiyun 			writel(error_status, qm->io_base +
1618*4882a593Smuzhiyun 			       QM_ABNORMAL_INT_SOURCE);
1619*4882a593Smuzhiyun 			return ACC_ERR_RECOVERED;
1620*4882a593Smuzhiyun 		}
1621*4882a593Smuzhiyun 
1622*4882a593Smuzhiyun 		return ACC_ERR_NEED_RESET;
1623*4882a593Smuzhiyun 	}
1624*4882a593Smuzhiyun 
1625*4882a593Smuzhiyun 	return ACC_ERR_RECOVERED;
1626*4882a593Smuzhiyun }
1627*4882a593Smuzhiyun 
1628*4882a593Smuzhiyun static const struct hisi_qm_hw_ops qm_hw_ops_v1 = {
1629*4882a593Smuzhiyun 	.qm_db = qm_db_v1,
1630*4882a593Smuzhiyun 	.get_irq_num = qm_get_irq_num_v1,
1631*4882a593Smuzhiyun 	.hw_error_init = qm_hw_error_init_v1,
1632*4882a593Smuzhiyun };
1633*4882a593Smuzhiyun 
1634*4882a593Smuzhiyun static const struct hisi_qm_hw_ops qm_hw_ops_v2 = {
1635*4882a593Smuzhiyun 	.get_vft = qm_get_vft_v2,
1636*4882a593Smuzhiyun 	.qm_db = qm_db_v2,
1637*4882a593Smuzhiyun 	.get_irq_num = qm_get_irq_num_v2,
1638*4882a593Smuzhiyun 	.hw_error_init = qm_hw_error_init_v2,
1639*4882a593Smuzhiyun 	.hw_error_uninit = qm_hw_error_uninit_v2,
1640*4882a593Smuzhiyun 	.hw_error_handle = qm_hw_error_handle_v2,
1641*4882a593Smuzhiyun };
1642*4882a593Smuzhiyun 
qm_get_avail_sqe(struct hisi_qp * qp)1643*4882a593Smuzhiyun static void *qm_get_avail_sqe(struct hisi_qp *qp)
1644*4882a593Smuzhiyun {
1645*4882a593Smuzhiyun 	struct hisi_qp_status *qp_status = &qp->qp_status;
1646*4882a593Smuzhiyun 	u16 sq_tail = qp_status->sq_tail;
1647*4882a593Smuzhiyun 
1648*4882a593Smuzhiyun 	if (unlikely(atomic_read(&qp->qp_status.used) == QM_Q_DEPTH - 1))
1649*4882a593Smuzhiyun 		return NULL;
1650*4882a593Smuzhiyun 
1651*4882a593Smuzhiyun 	return qp->sqe + sq_tail * qp->qm->sqe_size;
1652*4882a593Smuzhiyun }
1653*4882a593Smuzhiyun 
qm_create_qp_nolock(struct hisi_qm * qm,u8 alg_type)1654*4882a593Smuzhiyun static struct hisi_qp *qm_create_qp_nolock(struct hisi_qm *qm, u8 alg_type)
1655*4882a593Smuzhiyun {
1656*4882a593Smuzhiyun 	struct device *dev = &qm->pdev->dev;
1657*4882a593Smuzhiyun 	struct hisi_qp *qp;
1658*4882a593Smuzhiyun 	int qp_id;
1659*4882a593Smuzhiyun 
1660*4882a593Smuzhiyun 	if (!qm_qp_avail_state(qm, NULL, QP_INIT))
1661*4882a593Smuzhiyun 		return ERR_PTR(-EPERM);
1662*4882a593Smuzhiyun 
1663*4882a593Smuzhiyun 	if (qm->qp_in_used == qm->qp_num) {
1664*4882a593Smuzhiyun 		dev_info_ratelimited(dev, "All %u queues of QM are busy!\n",
1665*4882a593Smuzhiyun 				     qm->qp_num);
1666*4882a593Smuzhiyun 		atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
1667*4882a593Smuzhiyun 		return ERR_PTR(-EBUSY);
1668*4882a593Smuzhiyun 	}
1669*4882a593Smuzhiyun 
1670*4882a593Smuzhiyun 	qp_id = idr_alloc_cyclic(&qm->qp_idr, NULL, 0, qm->qp_num, GFP_ATOMIC);
1671*4882a593Smuzhiyun 	if (qp_id < 0) {
1672*4882a593Smuzhiyun 		dev_info_ratelimited(dev, "All %u queues of QM are busy!\n",
1673*4882a593Smuzhiyun 				    qm->qp_num);
1674*4882a593Smuzhiyun 		atomic64_inc(&qm->debug.dfx.create_qp_err_cnt);
1675*4882a593Smuzhiyun 		return ERR_PTR(-EBUSY);
1676*4882a593Smuzhiyun 	}
1677*4882a593Smuzhiyun 
1678*4882a593Smuzhiyun 	qp = &qm->qp_array[qp_id];
1679*4882a593Smuzhiyun 
1680*4882a593Smuzhiyun 	memset(qp->cqe, 0, sizeof(struct qm_cqe) * QM_Q_DEPTH);
1681*4882a593Smuzhiyun 
1682*4882a593Smuzhiyun 	qp->event_cb = NULL;
1683*4882a593Smuzhiyun 	qp->req_cb = NULL;
1684*4882a593Smuzhiyun 	qp->qp_id = qp_id;
1685*4882a593Smuzhiyun 	qp->alg_type = alg_type;
1686*4882a593Smuzhiyun 	qm->qp_in_used++;
1687*4882a593Smuzhiyun 	atomic_set(&qp->qp_status.flags, QP_INIT);
1688*4882a593Smuzhiyun 
1689*4882a593Smuzhiyun 	return qp;
1690*4882a593Smuzhiyun }
1691*4882a593Smuzhiyun 
1692*4882a593Smuzhiyun /**
1693*4882a593Smuzhiyun  * hisi_qm_create_qp() - Create a queue pair from qm.
1694*4882a593Smuzhiyun  * @qm: The qm we create a qp from.
1695*4882a593Smuzhiyun  * @alg_type: Accelerator specific algorithm type in sqc.
1696*4882a593Smuzhiyun  *
1697*4882a593Smuzhiyun  * return created qp, -EBUSY if all qps in qm allocated, -ENOMEM if allocating
1698*4882a593Smuzhiyun  * qp memory fails.
1699*4882a593Smuzhiyun  */
hisi_qm_create_qp(struct hisi_qm * qm,u8 alg_type)1700*4882a593Smuzhiyun struct hisi_qp *hisi_qm_create_qp(struct hisi_qm *qm, u8 alg_type)
1701*4882a593Smuzhiyun {
1702*4882a593Smuzhiyun 	struct hisi_qp *qp;
1703*4882a593Smuzhiyun 
1704*4882a593Smuzhiyun 	down_write(&qm->qps_lock);
1705*4882a593Smuzhiyun 	qp = qm_create_qp_nolock(qm, alg_type);
1706*4882a593Smuzhiyun 	up_write(&qm->qps_lock);
1707*4882a593Smuzhiyun 
1708*4882a593Smuzhiyun 	return qp;
1709*4882a593Smuzhiyun }
1710*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hisi_qm_create_qp);
1711*4882a593Smuzhiyun 
1712*4882a593Smuzhiyun /**
1713*4882a593Smuzhiyun  * hisi_qm_release_qp() - Release a qp back to its qm.
1714*4882a593Smuzhiyun  * @qp: The qp we want to release.
1715*4882a593Smuzhiyun  *
1716*4882a593Smuzhiyun  * This function releases the resource of a qp.
1717*4882a593Smuzhiyun  */
hisi_qm_release_qp(struct hisi_qp * qp)1718*4882a593Smuzhiyun void hisi_qm_release_qp(struct hisi_qp *qp)
1719*4882a593Smuzhiyun {
1720*4882a593Smuzhiyun 	struct hisi_qm *qm = qp->qm;
1721*4882a593Smuzhiyun 
1722*4882a593Smuzhiyun 	down_write(&qm->qps_lock);
1723*4882a593Smuzhiyun 
1724*4882a593Smuzhiyun 	if (!qm_qp_avail_state(qm, qp, QP_CLOSE)) {
1725*4882a593Smuzhiyun 		up_write(&qm->qps_lock);
1726*4882a593Smuzhiyun 		return;
1727*4882a593Smuzhiyun 	}
1728*4882a593Smuzhiyun 
1729*4882a593Smuzhiyun 	qm->qp_in_used--;
1730*4882a593Smuzhiyun 	idr_remove(&qm->qp_idr, qp->qp_id);
1731*4882a593Smuzhiyun 
1732*4882a593Smuzhiyun 	up_write(&qm->qps_lock);
1733*4882a593Smuzhiyun }
1734*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hisi_qm_release_qp);
1735*4882a593Smuzhiyun 
qm_qp_ctx_cfg(struct hisi_qp * qp,int qp_id,u32 pasid)1736*4882a593Smuzhiyun static int qm_qp_ctx_cfg(struct hisi_qp *qp, int qp_id, u32 pasid)
1737*4882a593Smuzhiyun {
1738*4882a593Smuzhiyun 	struct hisi_qm *qm = qp->qm;
1739*4882a593Smuzhiyun 	struct device *dev = &qm->pdev->dev;
1740*4882a593Smuzhiyun 	enum qm_hw_ver ver = qm->ver;
1741*4882a593Smuzhiyun 	struct qm_sqc *sqc;
1742*4882a593Smuzhiyun 	struct qm_cqc *cqc;
1743*4882a593Smuzhiyun 	dma_addr_t sqc_dma;
1744*4882a593Smuzhiyun 	dma_addr_t cqc_dma;
1745*4882a593Smuzhiyun 	int ret;
1746*4882a593Smuzhiyun 
1747*4882a593Smuzhiyun 	qm_init_qp_status(qp);
1748*4882a593Smuzhiyun 
1749*4882a593Smuzhiyun 	sqc = kzalloc(sizeof(struct qm_sqc), GFP_KERNEL);
1750*4882a593Smuzhiyun 	if (!sqc)
1751*4882a593Smuzhiyun 		return -ENOMEM;
1752*4882a593Smuzhiyun 	sqc_dma = dma_map_single(dev, sqc, sizeof(struct qm_sqc),
1753*4882a593Smuzhiyun 				 DMA_TO_DEVICE);
1754*4882a593Smuzhiyun 	if (dma_mapping_error(dev, sqc_dma)) {
1755*4882a593Smuzhiyun 		kfree(sqc);
1756*4882a593Smuzhiyun 		return -ENOMEM;
1757*4882a593Smuzhiyun 	}
1758*4882a593Smuzhiyun 
1759*4882a593Smuzhiyun 	INIT_QC_COMMON(sqc, qp->sqe_dma, pasid);
1760*4882a593Smuzhiyun 	if (ver == QM_HW_V1) {
1761*4882a593Smuzhiyun 		sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V1(0, 0, 0, qm->sqe_size));
1762*4882a593Smuzhiyun 		sqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
1763*4882a593Smuzhiyun 	} else {
1764*4882a593Smuzhiyun 		sqc->dw3 = cpu_to_le32(QM_MK_SQC_DW3_V2(qm->sqe_size));
1765*4882a593Smuzhiyun 		sqc->w8 = 0; /* rand_qc */
1766*4882a593Smuzhiyun 	}
1767*4882a593Smuzhiyun 	sqc->cq_num = cpu_to_le16(qp_id);
1768*4882a593Smuzhiyun 	sqc->w13 = cpu_to_le16(QM_MK_SQC_W13(0, 1, qp->alg_type));
1769*4882a593Smuzhiyun 
1770*4882a593Smuzhiyun 	ret = qm_mb(qm, QM_MB_CMD_SQC, sqc_dma, qp_id, 0);
1771*4882a593Smuzhiyun 	dma_unmap_single(dev, sqc_dma, sizeof(struct qm_sqc), DMA_TO_DEVICE);
1772*4882a593Smuzhiyun 	kfree(sqc);
1773*4882a593Smuzhiyun 	if (ret)
1774*4882a593Smuzhiyun 		return ret;
1775*4882a593Smuzhiyun 
1776*4882a593Smuzhiyun 	cqc = kzalloc(sizeof(struct qm_cqc), GFP_KERNEL);
1777*4882a593Smuzhiyun 	if (!cqc)
1778*4882a593Smuzhiyun 		return -ENOMEM;
1779*4882a593Smuzhiyun 	cqc_dma = dma_map_single(dev, cqc, sizeof(struct qm_cqc),
1780*4882a593Smuzhiyun 				 DMA_TO_DEVICE);
1781*4882a593Smuzhiyun 	if (dma_mapping_error(dev, cqc_dma)) {
1782*4882a593Smuzhiyun 		kfree(cqc);
1783*4882a593Smuzhiyun 		return -ENOMEM;
1784*4882a593Smuzhiyun 	}
1785*4882a593Smuzhiyun 
1786*4882a593Smuzhiyun 	INIT_QC_COMMON(cqc, qp->cqe_dma, pasid);
1787*4882a593Smuzhiyun 	if (ver == QM_HW_V1) {
1788*4882a593Smuzhiyun 		cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V1(0, 0, 0, 4));
1789*4882a593Smuzhiyun 		cqc->w8 = cpu_to_le16(QM_Q_DEPTH - 1);
1790*4882a593Smuzhiyun 	} else {
1791*4882a593Smuzhiyun 		cqc->dw3 = cpu_to_le32(QM_MK_CQC_DW3_V2(4));
1792*4882a593Smuzhiyun 		cqc->w8 = 0;
1793*4882a593Smuzhiyun 	}
1794*4882a593Smuzhiyun 	cqc->dw6 = cpu_to_le32(1 << QM_CQ_PHASE_SHIFT | 1 << QM_CQ_FLAG_SHIFT);
1795*4882a593Smuzhiyun 
1796*4882a593Smuzhiyun 	ret = qm_mb(qm, QM_MB_CMD_CQC, cqc_dma, qp_id, 0);
1797*4882a593Smuzhiyun 	dma_unmap_single(dev, cqc_dma, sizeof(struct qm_cqc), DMA_TO_DEVICE);
1798*4882a593Smuzhiyun 	kfree(cqc);
1799*4882a593Smuzhiyun 
1800*4882a593Smuzhiyun 	return ret;
1801*4882a593Smuzhiyun }
1802*4882a593Smuzhiyun 
qm_start_qp_nolock(struct hisi_qp * qp,unsigned long arg)1803*4882a593Smuzhiyun static int qm_start_qp_nolock(struct hisi_qp *qp, unsigned long arg)
1804*4882a593Smuzhiyun {
1805*4882a593Smuzhiyun 	struct hisi_qm *qm = qp->qm;
1806*4882a593Smuzhiyun 	struct device *dev = &qm->pdev->dev;
1807*4882a593Smuzhiyun 	int qp_id = qp->qp_id;
1808*4882a593Smuzhiyun 	u32 pasid = arg;
1809*4882a593Smuzhiyun 	int ret;
1810*4882a593Smuzhiyun 
1811*4882a593Smuzhiyun 	if (!qm_qp_avail_state(qm, qp, QP_START))
1812*4882a593Smuzhiyun 		return -EPERM;
1813*4882a593Smuzhiyun 
1814*4882a593Smuzhiyun 	ret = qm_qp_ctx_cfg(qp, qp_id, pasid);
1815*4882a593Smuzhiyun 	if (ret)
1816*4882a593Smuzhiyun 		return ret;
1817*4882a593Smuzhiyun 
1818*4882a593Smuzhiyun 	atomic_set(&qp->qp_status.flags, QP_START);
1819*4882a593Smuzhiyun 	dev_dbg(dev, "queue %d started\n", qp_id);
1820*4882a593Smuzhiyun 
1821*4882a593Smuzhiyun 	return 0;
1822*4882a593Smuzhiyun }
1823*4882a593Smuzhiyun 
1824*4882a593Smuzhiyun /**
1825*4882a593Smuzhiyun  * hisi_qm_start_qp() - Start a qp into running.
1826*4882a593Smuzhiyun  * @qp: The qp we want to start to run.
1827*4882a593Smuzhiyun  * @arg: Accelerator specific argument.
1828*4882a593Smuzhiyun  *
1829*4882a593Smuzhiyun  * After this function, qp can receive request from user. Return 0 if
1830*4882a593Smuzhiyun  * successful, Return -EBUSY if failed.
1831*4882a593Smuzhiyun  */
hisi_qm_start_qp(struct hisi_qp * qp,unsigned long arg)1832*4882a593Smuzhiyun int hisi_qm_start_qp(struct hisi_qp *qp, unsigned long arg)
1833*4882a593Smuzhiyun {
1834*4882a593Smuzhiyun 	struct hisi_qm *qm = qp->qm;
1835*4882a593Smuzhiyun 	int ret;
1836*4882a593Smuzhiyun 
1837*4882a593Smuzhiyun 	down_write(&qm->qps_lock);
1838*4882a593Smuzhiyun 	ret = qm_start_qp_nolock(qp, arg);
1839*4882a593Smuzhiyun 	up_write(&qm->qps_lock);
1840*4882a593Smuzhiyun 
1841*4882a593Smuzhiyun 	return ret;
1842*4882a593Smuzhiyun }
1843*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hisi_qm_start_qp);
1844*4882a593Smuzhiyun 
1845*4882a593Smuzhiyun /**
1846*4882a593Smuzhiyun  * Determine whether the queue is cleared by judging the tail pointers of
1847*4882a593Smuzhiyun  * sq and cq.
1848*4882a593Smuzhiyun  */
qm_drain_qp(struct hisi_qp * qp)1849*4882a593Smuzhiyun static int qm_drain_qp(struct hisi_qp *qp)
1850*4882a593Smuzhiyun {
1851*4882a593Smuzhiyun 	size_t size = sizeof(struct qm_sqc) + sizeof(struct qm_cqc);
1852*4882a593Smuzhiyun 	struct hisi_qm *qm = qp->qm;
1853*4882a593Smuzhiyun 	struct device *dev = &qm->pdev->dev;
1854*4882a593Smuzhiyun 	struct qm_sqc *sqc;
1855*4882a593Smuzhiyun 	struct qm_cqc *cqc;
1856*4882a593Smuzhiyun 	dma_addr_t dma_addr;
1857*4882a593Smuzhiyun 	int ret = 0, i = 0;
1858*4882a593Smuzhiyun 	void *addr;
1859*4882a593Smuzhiyun 
1860*4882a593Smuzhiyun 	/*
1861*4882a593Smuzhiyun 	 * No need to judge if ECC multi-bit error occurs because the
1862*4882a593Smuzhiyun 	 * master OOO will be blocked.
1863*4882a593Smuzhiyun 	 */
1864*4882a593Smuzhiyun 	if (qm->err_status.is_qm_ecc_mbit || qm->err_status.is_dev_ecc_mbit)
1865*4882a593Smuzhiyun 		return 0;
1866*4882a593Smuzhiyun 
1867*4882a593Smuzhiyun 	addr = qm_ctx_alloc(qm, size, &dma_addr);
1868*4882a593Smuzhiyun 	if (IS_ERR(addr)) {
1869*4882a593Smuzhiyun 		dev_err(dev, "Failed to alloc ctx for sqc and cqc!\n");
1870*4882a593Smuzhiyun 		return -ENOMEM;
1871*4882a593Smuzhiyun 	}
1872*4882a593Smuzhiyun 
1873*4882a593Smuzhiyun 	while (++i) {
1874*4882a593Smuzhiyun 		ret = qm_dump_sqc_raw(qm, dma_addr, qp->qp_id);
1875*4882a593Smuzhiyun 		if (ret) {
1876*4882a593Smuzhiyun 			dev_err_ratelimited(dev, "Failed to dump sqc!\n");
1877*4882a593Smuzhiyun 			break;
1878*4882a593Smuzhiyun 		}
1879*4882a593Smuzhiyun 		sqc = addr;
1880*4882a593Smuzhiyun 
1881*4882a593Smuzhiyun 		ret = qm_dump_cqc_raw(qm, (dma_addr + sizeof(struct qm_sqc)),
1882*4882a593Smuzhiyun 				      qp->qp_id);
1883*4882a593Smuzhiyun 		if (ret) {
1884*4882a593Smuzhiyun 			dev_err_ratelimited(dev, "Failed to dump cqc!\n");
1885*4882a593Smuzhiyun 			break;
1886*4882a593Smuzhiyun 		}
1887*4882a593Smuzhiyun 		cqc = addr + sizeof(struct qm_sqc);
1888*4882a593Smuzhiyun 
1889*4882a593Smuzhiyun 		if ((sqc->tail == cqc->tail) &&
1890*4882a593Smuzhiyun 		    (QM_SQ_TAIL_IDX(sqc) == QM_CQ_TAIL_IDX(cqc)))
1891*4882a593Smuzhiyun 			break;
1892*4882a593Smuzhiyun 
1893*4882a593Smuzhiyun 		if (i == MAX_WAIT_COUNTS) {
1894*4882a593Smuzhiyun 			dev_err(dev, "Fail to empty queue %u!\n", qp->qp_id);
1895*4882a593Smuzhiyun 			ret = -EBUSY;
1896*4882a593Smuzhiyun 			break;
1897*4882a593Smuzhiyun 		}
1898*4882a593Smuzhiyun 
1899*4882a593Smuzhiyun 		usleep_range(WAIT_PERIOD_US_MIN, WAIT_PERIOD_US_MAX);
1900*4882a593Smuzhiyun 	}
1901*4882a593Smuzhiyun 
1902*4882a593Smuzhiyun 	qm_ctx_free(qm, size, addr, &dma_addr);
1903*4882a593Smuzhiyun 
1904*4882a593Smuzhiyun 	return ret;
1905*4882a593Smuzhiyun }
1906*4882a593Smuzhiyun 
qm_stop_qp_nolock(struct hisi_qp * qp)1907*4882a593Smuzhiyun static int qm_stop_qp_nolock(struct hisi_qp *qp)
1908*4882a593Smuzhiyun {
1909*4882a593Smuzhiyun 	struct device *dev = &qp->qm->pdev->dev;
1910*4882a593Smuzhiyun 	int ret;
1911*4882a593Smuzhiyun 
1912*4882a593Smuzhiyun 	/*
1913*4882a593Smuzhiyun 	 * It is allowed to stop and release qp when reset, If the qp is
1914*4882a593Smuzhiyun 	 * stopped when reset but still want to be released then, the
1915*4882a593Smuzhiyun 	 * is_resetting flag should be set negative so that this qp will not
1916*4882a593Smuzhiyun 	 * be restarted after reset.
1917*4882a593Smuzhiyun 	 */
1918*4882a593Smuzhiyun 	if (atomic_read(&qp->qp_status.flags) == QP_STOP) {
1919*4882a593Smuzhiyun 		qp->is_resetting = false;
1920*4882a593Smuzhiyun 		return 0;
1921*4882a593Smuzhiyun 	}
1922*4882a593Smuzhiyun 
1923*4882a593Smuzhiyun 	if (!qm_qp_avail_state(qp->qm, qp, QP_STOP))
1924*4882a593Smuzhiyun 		return -EPERM;
1925*4882a593Smuzhiyun 
1926*4882a593Smuzhiyun 	atomic_set(&qp->qp_status.flags, QP_STOP);
1927*4882a593Smuzhiyun 
1928*4882a593Smuzhiyun 	ret = qm_drain_qp(qp);
1929*4882a593Smuzhiyun 	if (ret)
1930*4882a593Smuzhiyun 		dev_err(dev, "Failed to drain out data for stopping!\n");
1931*4882a593Smuzhiyun 
1932*4882a593Smuzhiyun 	if (qp->qm->wq)
1933*4882a593Smuzhiyun 		flush_workqueue(qp->qm->wq);
1934*4882a593Smuzhiyun 	else
1935*4882a593Smuzhiyun 		flush_work(&qp->qm->work);
1936*4882a593Smuzhiyun 
1937*4882a593Smuzhiyun 	dev_dbg(dev, "stop queue %u!", qp->qp_id);
1938*4882a593Smuzhiyun 
1939*4882a593Smuzhiyun 	return 0;
1940*4882a593Smuzhiyun }
1941*4882a593Smuzhiyun 
1942*4882a593Smuzhiyun /**
1943*4882a593Smuzhiyun  * hisi_qm_stop_qp() - Stop a qp in qm.
1944*4882a593Smuzhiyun  * @qp: The qp we want to stop.
1945*4882a593Smuzhiyun  *
1946*4882a593Smuzhiyun  * This function is reverse of hisi_qm_start_qp. Return 0 if successful.
1947*4882a593Smuzhiyun  */
hisi_qm_stop_qp(struct hisi_qp * qp)1948*4882a593Smuzhiyun int hisi_qm_stop_qp(struct hisi_qp *qp)
1949*4882a593Smuzhiyun {
1950*4882a593Smuzhiyun 	int ret;
1951*4882a593Smuzhiyun 
1952*4882a593Smuzhiyun 	down_write(&qp->qm->qps_lock);
1953*4882a593Smuzhiyun 	ret = qm_stop_qp_nolock(qp);
1954*4882a593Smuzhiyun 	up_write(&qp->qm->qps_lock);
1955*4882a593Smuzhiyun 
1956*4882a593Smuzhiyun 	return ret;
1957*4882a593Smuzhiyun }
1958*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hisi_qm_stop_qp);
1959*4882a593Smuzhiyun 
1960*4882a593Smuzhiyun /**
1961*4882a593Smuzhiyun  * hisi_qp_send() - Queue up a task in the hardware queue.
1962*4882a593Smuzhiyun  * @qp: The qp in which to put the message.
1963*4882a593Smuzhiyun  * @msg: The message.
1964*4882a593Smuzhiyun  *
1965*4882a593Smuzhiyun  * This function will return -EBUSY if qp is currently full, and -EAGAIN
1966*4882a593Smuzhiyun  * if qp related qm is resetting.
1967*4882a593Smuzhiyun  *
1968*4882a593Smuzhiyun  * Note: This function may run with qm_irq_thread and ACC reset at same time.
1969*4882a593Smuzhiyun  *       It has no race with qm_irq_thread. However, during hisi_qp_send, ACC
1970*4882a593Smuzhiyun  *       reset may happen, we have no lock here considering performance. This
1971*4882a593Smuzhiyun  *       causes current qm_db sending fail or can not receive sended sqe. QM
1972*4882a593Smuzhiyun  *       sync/async receive function should handle the error sqe. ACC reset
1973*4882a593Smuzhiyun  *       done function should clear used sqe to 0.
1974*4882a593Smuzhiyun  */
hisi_qp_send(struct hisi_qp * qp,const void * msg)1975*4882a593Smuzhiyun int hisi_qp_send(struct hisi_qp *qp, const void *msg)
1976*4882a593Smuzhiyun {
1977*4882a593Smuzhiyun 	struct hisi_qp_status *qp_status = &qp->qp_status;
1978*4882a593Smuzhiyun 	u16 sq_tail = qp_status->sq_tail;
1979*4882a593Smuzhiyun 	u16 sq_tail_next = (sq_tail + 1) % QM_Q_DEPTH;
1980*4882a593Smuzhiyun 	void *sqe = qm_get_avail_sqe(qp);
1981*4882a593Smuzhiyun 
1982*4882a593Smuzhiyun 	if (unlikely(atomic_read(&qp->qp_status.flags) == QP_STOP ||
1983*4882a593Smuzhiyun 		     atomic_read(&qp->qm->status.flags) == QM_STOP ||
1984*4882a593Smuzhiyun 		     qp->is_resetting)) {
1985*4882a593Smuzhiyun 		dev_info_ratelimited(&qp->qm->pdev->dev, "QP is stopped or resetting\n");
1986*4882a593Smuzhiyun 		return -EAGAIN;
1987*4882a593Smuzhiyun 	}
1988*4882a593Smuzhiyun 
1989*4882a593Smuzhiyun 	if (!sqe)
1990*4882a593Smuzhiyun 		return -EBUSY;
1991*4882a593Smuzhiyun 
1992*4882a593Smuzhiyun 	memcpy(sqe, msg, qp->qm->sqe_size);
1993*4882a593Smuzhiyun 
1994*4882a593Smuzhiyun 	qm_db(qp->qm, qp->qp_id, QM_DOORBELL_CMD_SQ, sq_tail_next, 0);
1995*4882a593Smuzhiyun 	atomic_inc(&qp->qp_status.used);
1996*4882a593Smuzhiyun 	qp_status->sq_tail = sq_tail_next;
1997*4882a593Smuzhiyun 
1998*4882a593Smuzhiyun 	return 0;
1999*4882a593Smuzhiyun }
2000*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hisi_qp_send);
2001*4882a593Smuzhiyun 
hisi_qm_cache_wb(struct hisi_qm * qm)2002*4882a593Smuzhiyun static void hisi_qm_cache_wb(struct hisi_qm *qm)
2003*4882a593Smuzhiyun {
2004*4882a593Smuzhiyun 	unsigned int val;
2005*4882a593Smuzhiyun 
2006*4882a593Smuzhiyun 	if (qm->ver == QM_HW_V1)
2007*4882a593Smuzhiyun 		return;
2008*4882a593Smuzhiyun 
2009*4882a593Smuzhiyun 	writel(0x1, qm->io_base + QM_CACHE_WB_START);
2010*4882a593Smuzhiyun 	if (readl_relaxed_poll_timeout(qm->io_base + QM_CACHE_WB_DONE,
2011*4882a593Smuzhiyun 					    val, val & BIT(0), 10, 1000))
2012*4882a593Smuzhiyun 		dev_err(&qm->pdev->dev, "QM writeback sqc cache fail!\n");
2013*4882a593Smuzhiyun }
2014*4882a593Smuzhiyun 
qm_qp_event_notifier(struct hisi_qp * qp)2015*4882a593Smuzhiyun static void qm_qp_event_notifier(struct hisi_qp *qp)
2016*4882a593Smuzhiyun {
2017*4882a593Smuzhiyun 	wake_up_interruptible(&qp->uacce_q->wait);
2018*4882a593Smuzhiyun }
2019*4882a593Smuzhiyun 
hisi_qm_get_available_instances(struct uacce_device * uacce)2020*4882a593Smuzhiyun static int hisi_qm_get_available_instances(struct uacce_device *uacce)
2021*4882a593Smuzhiyun {
2022*4882a593Smuzhiyun 	return hisi_qm_get_free_qp_num(uacce->priv);
2023*4882a593Smuzhiyun }
2024*4882a593Smuzhiyun 
hisi_qm_uacce_get_queue(struct uacce_device * uacce,unsigned long arg,struct uacce_queue * q)2025*4882a593Smuzhiyun static int hisi_qm_uacce_get_queue(struct uacce_device *uacce,
2026*4882a593Smuzhiyun 				   unsigned long arg,
2027*4882a593Smuzhiyun 				   struct uacce_queue *q)
2028*4882a593Smuzhiyun {
2029*4882a593Smuzhiyun 	struct hisi_qm *qm = uacce->priv;
2030*4882a593Smuzhiyun 	struct hisi_qp *qp;
2031*4882a593Smuzhiyun 	u8 alg_type = 0;
2032*4882a593Smuzhiyun 
2033*4882a593Smuzhiyun 	qp = hisi_qm_create_qp(qm, alg_type);
2034*4882a593Smuzhiyun 	if (IS_ERR(qp))
2035*4882a593Smuzhiyun 		return PTR_ERR(qp);
2036*4882a593Smuzhiyun 
2037*4882a593Smuzhiyun 	q->priv = qp;
2038*4882a593Smuzhiyun 	q->uacce = uacce;
2039*4882a593Smuzhiyun 	qp->uacce_q = q;
2040*4882a593Smuzhiyun 	qp->event_cb = qm_qp_event_notifier;
2041*4882a593Smuzhiyun 	qp->pasid = arg;
2042*4882a593Smuzhiyun 
2043*4882a593Smuzhiyun 	return 0;
2044*4882a593Smuzhiyun }
2045*4882a593Smuzhiyun 
hisi_qm_uacce_put_queue(struct uacce_queue * q)2046*4882a593Smuzhiyun static void hisi_qm_uacce_put_queue(struct uacce_queue *q)
2047*4882a593Smuzhiyun {
2048*4882a593Smuzhiyun 	struct hisi_qp *qp = q->priv;
2049*4882a593Smuzhiyun 
2050*4882a593Smuzhiyun 	hisi_qm_cache_wb(qp->qm);
2051*4882a593Smuzhiyun 	hisi_qm_release_qp(qp);
2052*4882a593Smuzhiyun }
2053*4882a593Smuzhiyun 
2054*4882a593Smuzhiyun /* map sq/cq/doorbell to user space */
hisi_qm_uacce_mmap(struct uacce_queue * q,struct vm_area_struct * vma,struct uacce_qfile_region * qfr)2055*4882a593Smuzhiyun static int hisi_qm_uacce_mmap(struct uacce_queue *q,
2056*4882a593Smuzhiyun 			      struct vm_area_struct *vma,
2057*4882a593Smuzhiyun 			      struct uacce_qfile_region *qfr)
2058*4882a593Smuzhiyun {
2059*4882a593Smuzhiyun 	struct hisi_qp *qp = q->priv;
2060*4882a593Smuzhiyun 	struct hisi_qm *qm = qp->qm;
2061*4882a593Smuzhiyun 	size_t sz = vma->vm_end - vma->vm_start;
2062*4882a593Smuzhiyun 	struct pci_dev *pdev = qm->pdev;
2063*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
2064*4882a593Smuzhiyun 	unsigned long vm_pgoff;
2065*4882a593Smuzhiyun 	int ret;
2066*4882a593Smuzhiyun 
2067*4882a593Smuzhiyun 	switch (qfr->type) {
2068*4882a593Smuzhiyun 	case UACCE_QFRT_MMIO:
2069*4882a593Smuzhiyun 		if (qm->ver == QM_HW_V1) {
2070*4882a593Smuzhiyun 			if (sz > PAGE_SIZE * QM_DOORBELL_PAGE_NR)
2071*4882a593Smuzhiyun 				return -EINVAL;
2072*4882a593Smuzhiyun 		} else {
2073*4882a593Smuzhiyun 			if (sz > PAGE_SIZE * (QM_DOORBELL_PAGE_NR +
2074*4882a593Smuzhiyun 			    QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE))
2075*4882a593Smuzhiyun 				return -EINVAL;
2076*4882a593Smuzhiyun 		}
2077*4882a593Smuzhiyun 
2078*4882a593Smuzhiyun 		vma->vm_flags |= VM_IO;
2079*4882a593Smuzhiyun 
2080*4882a593Smuzhiyun 		return remap_pfn_range(vma, vma->vm_start,
2081*4882a593Smuzhiyun 				       qm->phys_base >> PAGE_SHIFT,
2082*4882a593Smuzhiyun 				       sz, pgprot_noncached(vma->vm_page_prot));
2083*4882a593Smuzhiyun 	case UACCE_QFRT_DUS:
2084*4882a593Smuzhiyun 		if (sz != qp->qdma.size)
2085*4882a593Smuzhiyun 			return -EINVAL;
2086*4882a593Smuzhiyun 
2087*4882a593Smuzhiyun 		/*
2088*4882a593Smuzhiyun 		 * dma_mmap_coherent() requires vm_pgoff as 0
2089*4882a593Smuzhiyun 		 * restore vm_pfoff to initial value for mmap()
2090*4882a593Smuzhiyun 		 */
2091*4882a593Smuzhiyun 		vm_pgoff = vma->vm_pgoff;
2092*4882a593Smuzhiyun 		vma->vm_pgoff = 0;
2093*4882a593Smuzhiyun 		ret = dma_mmap_coherent(dev, vma, qp->qdma.va,
2094*4882a593Smuzhiyun 					qp->qdma.dma, sz);
2095*4882a593Smuzhiyun 		vma->vm_pgoff = vm_pgoff;
2096*4882a593Smuzhiyun 		return ret;
2097*4882a593Smuzhiyun 
2098*4882a593Smuzhiyun 	default:
2099*4882a593Smuzhiyun 		return -EINVAL;
2100*4882a593Smuzhiyun 	}
2101*4882a593Smuzhiyun }
2102*4882a593Smuzhiyun 
hisi_qm_uacce_start_queue(struct uacce_queue * q)2103*4882a593Smuzhiyun static int hisi_qm_uacce_start_queue(struct uacce_queue *q)
2104*4882a593Smuzhiyun {
2105*4882a593Smuzhiyun 	struct hisi_qp *qp = q->priv;
2106*4882a593Smuzhiyun 
2107*4882a593Smuzhiyun 	return hisi_qm_start_qp(qp, qp->pasid);
2108*4882a593Smuzhiyun }
2109*4882a593Smuzhiyun 
hisi_qm_uacce_stop_queue(struct uacce_queue * q)2110*4882a593Smuzhiyun static void hisi_qm_uacce_stop_queue(struct uacce_queue *q)
2111*4882a593Smuzhiyun {
2112*4882a593Smuzhiyun 	hisi_qm_stop_qp(q->priv);
2113*4882a593Smuzhiyun }
2114*4882a593Smuzhiyun 
qm_set_sqctype(struct uacce_queue * q,u16 type)2115*4882a593Smuzhiyun static int qm_set_sqctype(struct uacce_queue *q, u16 type)
2116*4882a593Smuzhiyun {
2117*4882a593Smuzhiyun 	struct hisi_qm *qm = q->uacce->priv;
2118*4882a593Smuzhiyun 	struct hisi_qp *qp = q->priv;
2119*4882a593Smuzhiyun 
2120*4882a593Smuzhiyun 	down_write(&qm->qps_lock);
2121*4882a593Smuzhiyun 	qp->alg_type = type;
2122*4882a593Smuzhiyun 	up_write(&qm->qps_lock);
2123*4882a593Smuzhiyun 
2124*4882a593Smuzhiyun 	return 0;
2125*4882a593Smuzhiyun }
2126*4882a593Smuzhiyun 
hisi_qm_uacce_ioctl(struct uacce_queue * q,unsigned int cmd,unsigned long arg)2127*4882a593Smuzhiyun static long hisi_qm_uacce_ioctl(struct uacce_queue *q, unsigned int cmd,
2128*4882a593Smuzhiyun 				unsigned long arg)
2129*4882a593Smuzhiyun {
2130*4882a593Smuzhiyun 	struct hisi_qp *qp = q->priv;
2131*4882a593Smuzhiyun 	struct hisi_qp_ctx qp_ctx;
2132*4882a593Smuzhiyun 
2133*4882a593Smuzhiyun 	if (cmd == UACCE_CMD_QM_SET_QP_CTX) {
2134*4882a593Smuzhiyun 		if (copy_from_user(&qp_ctx, (void __user *)arg,
2135*4882a593Smuzhiyun 				   sizeof(struct hisi_qp_ctx)))
2136*4882a593Smuzhiyun 			return -EFAULT;
2137*4882a593Smuzhiyun 
2138*4882a593Smuzhiyun 		if (qp_ctx.qc_type != 0 && qp_ctx.qc_type != 1)
2139*4882a593Smuzhiyun 			return -EINVAL;
2140*4882a593Smuzhiyun 
2141*4882a593Smuzhiyun 		qm_set_sqctype(q, qp_ctx.qc_type);
2142*4882a593Smuzhiyun 		qp_ctx.id = qp->qp_id;
2143*4882a593Smuzhiyun 
2144*4882a593Smuzhiyun 		if (copy_to_user((void __user *)arg, &qp_ctx,
2145*4882a593Smuzhiyun 				 sizeof(struct hisi_qp_ctx)))
2146*4882a593Smuzhiyun 			return -EFAULT;
2147*4882a593Smuzhiyun 	} else {
2148*4882a593Smuzhiyun 		return -EINVAL;
2149*4882a593Smuzhiyun 	}
2150*4882a593Smuzhiyun 
2151*4882a593Smuzhiyun 	return 0;
2152*4882a593Smuzhiyun }
2153*4882a593Smuzhiyun 
2154*4882a593Smuzhiyun static const struct uacce_ops uacce_qm_ops = {
2155*4882a593Smuzhiyun 	.get_available_instances = hisi_qm_get_available_instances,
2156*4882a593Smuzhiyun 	.get_queue = hisi_qm_uacce_get_queue,
2157*4882a593Smuzhiyun 	.put_queue = hisi_qm_uacce_put_queue,
2158*4882a593Smuzhiyun 	.start_queue = hisi_qm_uacce_start_queue,
2159*4882a593Smuzhiyun 	.stop_queue = hisi_qm_uacce_stop_queue,
2160*4882a593Smuzhiyun 	.mmap = hisi_qm_uacce_mmap,
2161*4882a593Smuzhiyun 	.ioctl = hisi_qm_uacce_ioctl,
2162*4882a593Smuzhiyun };
2163*4882a593Smuzhiyun 
qm_alloc_uacce(struct hisi_qm * qm)2164*4882a593Smuzhiyun static int qm_alloc_uacce(struct hisi_qm *qm)
2165*4882a593Smuzhiyun {
2166*4882a593Smuzhiyun 	struct pci_dev *pdev = qm->pdev;
2167*4882a593Smuzhiyun 	struct uacce_device *uacce;
2168*4882a593Smuzhiyun 	unsigned long mmio_page_nr;
2169*4882a593Smuzhiyun 	unsigned long dus_page_nr;
2170*4882a593Smuzhiyun 	struct uacce_interface interface = {
2171*4882a593Smuzhiyun 		.flags = UACCE_DEV_SVA,
2172*4882a593Smuzhiyun 		.ops = &uacce_qm_ops,
2173*4882a593Smuzhiyun 	};
2174*4882a593Smuzhiyun 	int ret;
2175*4882a593Smuzhiyun 
2176*4882a593Smuzhiyun 	ret = strscpy(interface.name, pdev->driver->name,
2177*4882a593Smuzhiyun 		      sizeof(interface.name));
2178*4882a593Smuzhiyun 	if (ret < 0)
2179*4882a593Smuzhiyun 		return -ENAMETOOLONG;
2180*4882a593Smuzhiyun 
2181*4882a593Smuzhiyun 	uacce = uacce_alloc(&pdev->dev, &interface);
2182*4882a593Smuzhiyun 	if (IS_ERR(uacce))
2183*4882a593Smuzhiyun 		return PTR_ERR(uacce);
2184*4882a593Smuzhiyun 
2185*4882a593Smuzhiyun 	if (uacce->flags & UACCE_DEV_SVA) {
2186*4882a593Smuzhiyun 		qm->use_sva = true;
2187*4882a593Smuzhiyun 	} else {
2188*4882a593Smuzhiyun 		/* only consider sva case */
2189*4882a593Smuzhiyun 		uacce_remove(uacce);
2190*4882a593Smuzhiyun 		qm->uacce = NULL;
2191*4882a593Smuzhiyun 		return -EINVAL;
2192*4882a593Smuzhiyun 	}
2193*4882a593Smuzhiyun 
2194*4882a593Smuzhiyun 	uacce->is_vf = pdev->is_virtfn;
2195*4882a593Smuzhiyun 	uacce->priv = qm;
2196*4882a593Smuzhiyun 	uacce->algs = qm->algs;
2197*4882a593Smuzhiyun 
2198*4882a593Smuzhiyun 	if (qm->ver == QM_HW_V1) {
2199*4882a593Smuzhiyun 		mmio_page_nr = QM_DOORBELL_PAGE_NR;
2200*4882a593Smuzhiyun 		uacce->api_ver = HISI_QM_API_VER_BASE;
2201*4882a593Smuzhiyun 	} else {
2202*4882a593Smuzhiyun 		mmio_page_nr = QM_DOORBELL_PAGE_NR +
2203*4882a593Smuzhiyun 			QM_DOORBELL_SQ_CQ_BASE_V2 / PAGE_SIZE;
2204*4882a593Smuzhiyun 		uacce->api_ver = HISI_QM_API_VER2_BASE;
2205*4882a593Smuzhiyun 	}
2206*4882a593Smuzhiyun 
2207*4882a593Smuzhiyun 	dus_page_nr = (PAGE_SIZE - 1 + qm->sqe_size * QM_Q_DEPTH +
2208*4882a593Smuzhiyun 		       sizeof(struct qm_cqe) * QM_Q_DEPTH) >> PAGE_SHIFT;
2209*4882a593Smuzhiyun 
2210*4882a593Smuzhiyun 	uacce->qf_pg_num[UACCE_QFRT_MMIO] = mmio_page_nr;
2211*4882a593Smuzhiyun 	uacce->qf_pg_num[UACCE_QFRT_DUS]  = dus_page_nr;
2212*4882a593Smuzhiyun 
2213*4882a593Smuzhiyun 	qm->uacce = uacce;
2214*4882a593Smuzhiyun 
2215*4882a593Smuzhiyun 	return 0;
2216*4882a593Smuzhiyun }
2217*4882a593Smuzhiyun 
2218*4882a593Smuzhiyun /**
2219*4882a593Smuzhiyun  * qm_frozen() - Try to froze QM to cut continuous queue request. If
2220*4882a593Smuzhiyun  * there is user on the QM, return failure without doing anything.
2221*4882a593Smuzhiyun  * @qm: The qm needed to be fronzen.
2222*4882a593Smuzhiyun  *
2223*4882a593Smuzhiyun  * This function frozes QM, then we can do SRIOV disabling.
2224*4882a593Smuzhiyun  */
qm_frozen(struct hisi_qm * qm)2225*4882a593Smuzhiyun static int qm_frozen(struct hisi_qm *qm)
2226*4882a593Smuzhiyun {
2227*4882a593Smuzhiyun 	down_write(&qm->qps_lock);
2228*4882a593Smuzhiyun 
2229*4882a593Smuzhiyun 	if (qm->is_frozen) {
2230*4882a593Smuzhiyun 		up_write(&qm->qps_lock);
2231*4882a593Smuzhiyun 		return 0;
2232*4882a593Smuzhiyun 	}
2233*4882a593Smuzhiyun 
2234*4882a593Smuzhiyun 	if (!qm->qp_in_used) {
2235*4882a593Smuzhiyun 		qm->qp_in_used = qm->qp_num;
2236*4882a593Smuzhiyun 		qm->is_frozen = true;
2237*4882a593Smuzhiyun 		up_write(&qm->qps_lock);
2238*4882a593Smuzhiyun 		return 0;
2239*4882a593Smuzhiyun 	}
2240*4882a593Smuzhiyun 
2241*4882a593Smuzhiyun 	up_write(&qm->qps_lock);
2242*4882a593Smuzhiyun 
2243*4882a593Smuzhiyun 	return -EBUSY;
2244*4882a593Smuzhiyun }
2245*4882a593Smuzhiyun 
qm_try_frozen_vfs(struct pci_dev * pdev,struct hisi_qm_list * qm_list)2246*4882a593Smuzhiyun static int qm_try_frozen_vfs(struct pci_dev *pdev,
2247*4882a593Smuzhiyun 			     struct hisi_qm_list *qm_list)
2248*4882a593Smuzhiyun {
2249*4882a593Smuzhiyun 	struct hisi_qm *qm, *vf_qm;
2250*4882a593Smuzhiyun 	struct pci_dev *dev;
2251*4882a593Smuzhiyun 	int ret = 0;
2252*4882a593Smuzhiyun 
2253*4882a593Smuzhiyun 	if (!qm_list || !pdev)
2254*4882a593Smuzhiyun 		return -EINVAL;
2255*4882a593Smuzhiyun 
2256*4882a593Smuzhiyun 	/* Try to frozen all the VFs as disable SRIOV */
2257*4882a593Smuzhiyun 	mutex_lock(&qm_list->lock);
2258*4882a593Smuzhiyun 	list_for_each_entry(qm, &qm_list->list, list) {
2259*4882a593Smuzhiyun 		dev = qm->pdev;
2260*4882a593Smuzhiyun 		if (dev == pdev)
2261*4882a593Smuzhiyun 			continue;
2262*4882a593Smuzhiyun 		if (pci_physfn(dev) == pdev) {
2263*4882a593Smuzhiyun 			vf_qm = pci_get_drvdata(dev);
2264*4882a593Smuzhiyun 			ret = qm_frozen(vf_qm);
2265*4882a593Smuzhiyun 			if (ret)
2266*4882a593Smuzhiyun 				goto frozen_fail;
2267*4882a593Smuzhiyun 		}
2268*4882a593Smuzhiyun 	}
2269*4882a593Smuzhiyun 
2270*4882a593Smuzhiyun frozen_fail:
2271*4882a593Smuzhiyun 	mutex_unlock(&qm_list->lock);
2272*4882a593Smuzhiyun 
2273*4882a593Smuzhiyun 	return ret;
2274*4882a593Smuzhiyun }
2275*4882a593Smuzhiyun 
2276*4882a593Smuzhiyun /**
2277*4882a593Smuzhiyun  * hisi_qm_wait_task_finish() - Wait until the task is finished
2278*4882a593Smuzhiyun  * when removing the driver.
2279*4882a593Smuzhiyun  * @qm: The qm needed to wait for the task to finish.
2280*4882a593Smuzhiyun  * @qm_list: The list of all available devices.
2281*4882a593Smuzhiyun  */
hisi_qm_wait_task_finish(struct hisi_qm * qm,struct hisi_qm_list * qm_list)2282*4882a593Smuzhiyun void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
2283*4882a593Smuzhiyun {
2284*4882a593Smuzhiyun 	while (qm_frozen(qm) ||
2285*4882a593Smuzhiyun 	       ((qm->fun_type == QM_HW_PF) &&
2286*4882a593Smuzhiyun 	       qm_try_frozen_vfs(qm->pdev, qm_list))) {
2287*4882a593Smuzhiyun 		msleep(WAIT_PERIOD);
2288*4882a593Smuzhiyun 	}
2289*4882a593Smuzhiyun 
2290*4882a593Smuzhiyun 	udelay(REMOVE_WAIT_DELAY);
2291*4882a593Smuzhiyun }
2292*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hisi_qm_wait_task_finish);
2293*4882a593Smuzhiyun 
2294*4882a593Smuzhiyun /**
2295*4882a593Smuzhiyun  * hisi_qm_get_free_qp_num() - Get free number of qp in qm.
2296*4882a593Smuzhiyun  * @qm: The qm which want to get free qp.
2297*4882a593Smuzhiyun  *
2298*4882a593Smuzhiyun  * This function return free number of qp in qm.
2299*4882a593Smuzhiyun  */
hisi_qm_get_free_qp_num(struct hisi_qm * qm)2300*4882a593Smuzhiyun int hisi_qm_get_free_qp_num(struct hisi_qm *qm)
2301*4882a593Smuzhiyun {
2302*4882a593Smuzhiyun 	int ret;
2303*4882a593Smuzhiyun 
2304*4882a593Smuzhiyun 	down_read(&qm->qps_lock);
2305*4882a593Smuzhiyun 	ret = qm->qp_num - qm->qp_in_used;
2306*4882a593Smuzhiyun 	up_read(&qm->qps_lock);
2307*4882a593Smuzhiyun 
2308*4882a593Smuzhiyun 	return ret;
2309*4882a593Smuzhiyun }
2310*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hisi_qm_get_free_qp_num);
2311*4882a593Smuzhiyun 
hisi_qp_memory_uninit(struct hisi_qm * qm,int num)2312*4882a593Smuzhiyun static void hisi_qp_memory_uninit(struct hisi_qm *qm, int num)
2313*4882a593Smuzhiyun {
2314*4882a593Smuzhiyun 	struct device *dev = &qm->pdev->dev;
2315*4882a593Smuzhiyun 	struct qm_dma *qdma;
2316*4882a593Smuzhiyun 	int i;
2317*4882a593Smuzhiyun 
2318*4882a593Smuzhiyun 	for (i = num - 1; i >= 0; i--) {
2319*4882a593Smuzhiyun 		qdma = &qm->qp_array[i].qdma;
2320*4882a593Smuzhiyun 		dma_free_coherent(dev, qdma->size, qdma->va, qdma->dma);
2321*4882a593Smuzhiyun 	}
2322*4882a593Smuzhiyun 
2323*4882a593Smuzhiyun 	kfree(qm->qp_array);
2324*4882a593Smuzhiyun }
2325*4882a593Smuzhiyun 
hisi_qp_memory_init(struct hisi_qm * qm,size_t dma_size,int id)2326*4882a593Smuzhiyun static int hisi_qp_memory_init(struct hisi_qm *qm, size_t dma_size, int id)
2327*4882a593Smuzhiyun {
2328*4882a593Smuzhiyun 	struct device *dev = &qm->pdev->dev;
2329*4882a593Smuzhiyun 	size_t off = qm->sqe_size * QM_Q_DEPTH;
2330*4882a593Smuzhiyun 	struct hisi_qp *qp;
2331*4882a593Smuzhiyun 
2332*4882a593Smuzhiyun 	qp = &qm->qp_array[id];
2333*4882a593Smuzhiyun 	qp->qdma.va = dma_alloc_coherent(dev, dma_size, &qp->qdma.dma,
2334*4882a593Smuzhiyun 					 GFP_KERNEL);
2335*4882a593Smuzhiyun 	if (!qp->qdma.va)
2336*4882a593Smuzhiyun 		return -ENOMEM;
2337*4882a593Smuzhiyun 
2338*4882a593Smuzhiyun 	qp->sqe = qp->qdma.va;
2339*4882a593Smuzhiyun 	qp->sqe_dma = qp->qdma.dma;
2340*4882a593Smuzhiyun 	qp->cqe = qp->qdma.va + off;
2341*4882a593Smuzhiyun 	qp->cqe_dma = qp->qdma.dma + off;
2342*4882a593Smuzhiyun 	qp->qdma.size = dma_size;
2343*4882a593Smuzhiyun 	qp->qm = qm;
2344*4882a593Smuzhiyun 	qp->qp_id = id;
2345*4882a593Smuzhiyun 
2346*4882a593Smuzhiyun 	return 0;
2347*4882a593Smuzhiyun }
2348*4882a593Smuzhiyun 
hisi_qm_memory_init(struct hisi_qm * qm)2349*4882a593Smuzhiyun static int hisi_qm_memory_init(struct hisi_qm *qm)
2350*4882a593Smuzhiyun {
2351*4882a593Smuzhiyun 	struct device *dev = &qm->pdev->dev;
2352*4882a593Smuzhiyun 	size_t qp_dma_size, off = 0;
2353*4882a593Smuzhiyun 	int i, ret = 0;
2354*4882a593Smuzhiyun 
2355*4882a593Smuzhiyun #define QM_INIT_BUF(qm, type, num) do { \
2356*4882a593Smuzhiyun 	(qm)->type = ((qm)->qdma.va + (off)); \
2357*4882a593Smuzhiyun 	(qm)->type##_dma = (qm)->qdma.dma + (off); \
2358*4882a593Smuzhiyun 	off += QMC_ALIGN(sizeof(struct qm_##type) * (num)); \
2359*4882a593Smuzhiyun } while (0)
2360*4882a593Smuzhiyun 
2361*4882a593Smuzhiyun 	idr_init(&qm->qp_idr);
2362*4882a593Smuzhiyun 	qm->qdma.size = QMC_ALIGN(sizeof(struct qm_eqe) * QM_EQ_DEPTH) +
2363*4882a593Smuzhiyun 			QMC_ALIGN(sizeof(struct qm_aeqe) * QM_Q_DEPTH) +
2364*4882a593Smuzhiyun 			QMC_ALIGN(sizeof(struct qm_sqc) * qm->qp_num) +
2365*4882a593Smuzhiyun 			QMC_ALIGN(sizeof(struct qm_cqc) * qm->qp_num);
2366*4882a593Smuzhiyun 	qm->qdma.va = dma_alloc_coherent(dev, qm->qdma.size, &qm->qdma.dma,
2367*4882a593Smuzhiyun 					 GFP_ATOMIC);
2368*4882a593Smuzhiyun 	dev_dbg(dev, "allocate qm dma buf size=%zx)\n", qm->qdma.size);
2369*4882a593Smuzhiyun 	if (!qm->qdma.va)
2370*4882a593Smuzhiyun 		return -ENOMEM;
2371*4882a593Smuzhiyun 
2372*4882a593Smuzhiyun 	QM_INIT_BUF(qm, eqe, QM_EQ_DEPTH);
2373*4882a593Smuzhiyun 	QM_INIT_BUF(qm, aeqe, QM_Q_DEPTH);
2374*4882a593Smuzhiyun 	QM_INIT_BUF(qm, sqc, qm->qp_num);
2375*4882a593Smuzhiyun 	QM_INIT_BUF(qm, cqc, qm->qp_num);
2376*4882a593Smuzhiyun 
2377*4882a593Smuzhiyun 	qm->qp_array = kcalloc(qm->qp_num, sizeof(struct hisi_qp), GFP_KERNEL);
2378*4882a593Smuzhiyun 	if (!qm->qp_array) {
2379*4882a593Smuzhiyun 		ret = -ENOMEM;
2380*4882a593Smuzhiyun 		goto err_alloc_qp_array;
2381*4882a593Smuzhiyun 	}
2382*4882a593Smuzhiyun 
2383*4882a593Smuzhiyun 	/* one more page for device or qp statuses */
2384*4882a593Smuzhiyun 	qp_dma_size = qm->sqe_size * QM_Q_DEPTH +
2385*4882a593Smuzhiyun 		      sizeof(struct qm_cqe) * QM_Q_DEPTH;
2386*4882a593Smuzhiyun 	qp_dma_size = PAGE_ALIGN(qp_dma_size);
2387*4882a593Smuzhiyun 	for (i = 0; i < qm->qp_num; i++) {
2388*4882a593Smuzhiyun 		ret = hisi_qp_memory_init(qm, qp_dma_size, i);
2389*4882a593Smuzhiyun 		if (ret)
2390*4882a593Smuzhiyun 			goto err_init_qp_mem;
2391*4882a593Smuzhiyun 
2392*4882a593Smuzhiyun 		dev_dbg(dev, "allocate qp dma buf size=%zx)\n", qp_dma_size);
2393*4882a593Smuzhiyun 	}
2394*4882a593Smuzhiyun 
2395*4882a593Smuzhiyun 	return ret;
2396*4882a593Smuzhiyun 
2397*4882a593Smuzhiyun err_init_qp_mem:
2398*4882a593Smuzhiyun 	hisi_qp_memory_uninit(qm, i);
2399*4882a593Smuzhiyun err_alloc_qp_array:
2400*4882a593Smuzhiyun 	dma_free_coherent(dev, qm->qdma.size, qm->qdma.va, qm->qdma.dma);
2401*4882a593Smuzhiyun 
2402*4882a593Smuzhiyun 	return ret;
2403*4882a593Smuzhiyun }
2404*4882a593Smuzhiyun 
hisi_qm_pre_init(struct hisi_qm * qm)2405*4882a593Smuzhiyun static void hisi_qm_pre_init(struct hisi_qm *qm)
2406*4882a593Smuzhiyun {
2407*4882a593Smuzhiyun 	struct pci_dev *pdev = qm->pdev;
2408*4882a593Smuzhiyun 
2409*4882a593Smuzhiyun 	if (qm->ver == QM_HW_V1)
2410*4882a593Smuzhiyun 		qm->ops = &qm_hw_ops_v1;
2411*4882a593Smuzhiyun 	else
2412*4882a593Smuzhiyun 		qm->ops = &qm_hw_ops_v2;
2413*4882a593Smuzhiyun 
2414*4882a593Smuzhiyun 	pci_set_drvdata(pdev, qm);
2415*4882a593Smuzhiyun 	mutex_init(&qm->mailbox_lock);
2416*4882a593Smuzhiyun 	init_rwsem(&qm->qps_lock);
2417*4882a593Smuzhiyun 	qm->qp_in_used = 0;
2418*4882a593Smuzhiyun 	qm->is_frozen = false;
2419*4882a593Smuzhiyun }
2420*4882a593Smuzhiyun 
2421*4882a593Smuzhiyun /**
2422*4882a593Smuzhiyun  * hisi_qm_uninit() - Uninitialize qm.
2423*4882a593Smuzhiyun  * @qm: The qm needed uninit.
2424*4882a593Smuzhiyun  *
2425*4882a593Smuzhiyun  * This function uninits qm related device resources.
2426*4882a593Smuzhiyun  */
hisi_qm_uninit(struct hisi_qm * qm)2427*4882a593Smuzhiyun void hisi_qm_uninit(struct hisi_qm *qm)
2428*4882a593Smuzhiyun {
2429*4882a593Smuzhiyun 	struct pci_dev *pdev = qm->pdev;
2430*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
2431*4882a593Smuzhiyun 
2432*4882a593Smuzhiyun 	down_write(&qm->qps_lock);
2433*4882a593Smuzhiyun 
2434*4882a593Smuzhiyun 	if (!qm_avail_state(qm, QM_CLOSE)) {
2435*4882a593Smuzhiyun 		up_write(&qm->qps_lock);
2436*4882a593Smuzhiyun 		return;
2437*4882a593Smuzhiyun 	}
2438*4882a593Smuzhiyun 
2439*4882a593Smuzhiyun 	uacce_remove(qm->uacce);
2440*4882a593Smuzhiyun 	qm->uacce = NULL;
2441*4882a593Smuzhiyun 
2442*4882a593Smuzhiyun 	hisi_qp_memory_uninit(qm, qm->qp_num);
2443*4882a593Smuzhiyun 	idr_destroy(&qm->qp_idr);
2444*4882a593Smuzhiyun 
2445*4882a593Smuzhiyun 	if (qm->qdma.va) {
2446*4882a593Smuzhiyun 		hisi_qm_cache_wb(qm);
2447*4882a593Smuzhiyun 		dma_free_coherent(dev, qm->qdma.size,
2448*4882a593Smuzhiyun 				  qm->qdma.va, qm->qdma.dma);
2449*4882a593Smuzhiyun 		memset(&qm->qdma, 0, sizeof(qm->qdma));
2450*4882a593Smuzhiyun 	}
2451*4882a593Smuzhiyun 
2452*4882a593Smuzhiyun 	qm_irq_unregister(qm);
2453*4882a593Smuzhiyun 	pci_free_irq_vectors(pdev);
2454*4882a593Smuzhiyun 	iounmap(qm->io_base);
2455*4882a593Smuzhiyun 	pci_release_mem_regions(pdev);
2456*4882a593Smuzhiyun 	pci_disable_device(pdev);
2457*4882a593Smuzhiyun 
2458*4882a593Smuzhiyun 	up_write(&qm->qps_lock);
2459*4882a593Smuzhiyun }
2460*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hisi_qm_uninit);
2461*4882a593Smuzhiyun 
2462*4882a593Smuzhiyun /**
2463*4882a593Smuzhiyun  * hisi_qm_get_vft() - Get vft from a qm.
2464*4882a593Smuzhiyun  * @qm: The qm we want to get its vft.
2465*4882a593Smuzhiyun  * @base: The base number of queue in vft.
2466*4882a593Smuzhiyun  * @number: The number of queues in vft.
2467*4882a593Smuzhiyun  *
2468*4882a593Smuzhiyun  * We can allocate multiple queues to a qm by configuring virtual function
2469*4882a593Smuzhiyun  * table. We get related configures by this function. Normally, we call this
2470*4882a593Smuzhiyun  * function in VF driver to get the queue information.
2471*4882a593Smuzhiyun  *
2472*4882a593Smuzhiyun  * qm hw v1 does not support this interface.
2473*4882a593Smuzhiyun  */
hisi_qm_get_vft(struct hisi_qm * qm,u32 * base,u32 * number)2474*4882a593Smuzhiyun int hisi_qm_get_vft(struct hisi_qm *qm, u32 *base, u32 *number)
2475*4882a593Smuzhiyun {
2476*4882a593Smuzhiyun 	if (!base || !number)
2477*4882a593Smuzhiyun 		return -EINVAL;
2478*4882a593Smuzhiyun 
2479*4882a593Smuzhiyun 	if (!qm->ops->get_vft) {
2480*4882a593Smuzhiyun 		dev_err(&qm->pdev->dev, "Don't support vft read!\n");
2481*4882a593Smuzhiyun 		return -EINVAL;
2482*4882a593Smuzhiyun 	}
2483*4882a593Smuzhiyun 
2484*4882a593Smuzhiyun 	return qm->ops->get_vft(qm, base, number);
2485*4882a593Smuzhiyun }
2486*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hisi_qm_get_vft);
2487*4882a593Smuzhiyun 
2488*4882a593Smuzhiyun /**
2489*4882a593Smuzhiyun  * This function is alway called in PF driver, it is used to assign queues
2490*4882a593Smuzhiyun  * among PF and VFs.
2491*4882a593Smuzhiyun  *
2492*4882a593Smuzhiyun  * Assign queues A~B to PF: hisi_qm_set_vft(qm, 0, A, B - A + 1)
2493*4882a593Smuzhiyun  * Assign queues A~B to VF: hisi_qm_set_vft(qm, 2, A, B - A + 1)
2494*4882a593Smuzhiyun  * (VF function number 0x2)
2495*4882a593Smuzhiyun  */
hisi_qm_set_vft(struct hisi_qm * qm,u32 fun_num,u32 base,u32 number)2496*4882a593Smuzhiyun static int hisi_qm_set_vft(struct hisi_qm *qm, u32 fun_num, u32 base,
2497*4882a593Smuzhiyun 		    u32 number)
2498*4882a593Smuzhiyun {
2499*4882a593Smuzhiyun 	u32 max_q_num = qm->ctrl_qp_num;
2500*4882a593Smuzhiyun 
2501*4882a593Smuzhiyun 	if (base >= max_q_num || number > max_q_num ||
2502*4882a593Smuzhiyun 	    (base + number) > max_q_num)
2503*4882a593Smuzhiyun 		return -EINVAL;
2504*4882a593Smuzhiyun 
2505*4882a593Smuzhiyun 	return qm_set_sqc_cqc_vft(qm, fun_num, base, number);
2506*4882a593Smuzhiyun }
2507*4882a593Smuzhiyun 
qm_init_eq_aeq_status(struct hisi_qm * qm)2508*4882a593Smuzhiyun static void qm_init_eq_aeq_status(struct hisi_qm *qm)
2509*4882a593Smuzhiyun {
2510*4882a593Smuzhiyun 	struct hisi_qm_status *status = &qm->status;
2511*4882a593Smuzhiyun 
2512*4882a593Smuzhiyun 	status->eq_head = 0;
2513*4882a593Smuzhiyun 	status->aeq_head = 0;
2514*4882a593Smuzhiyun 	status->eqc_phase = true;
2515*4882a593Smuzhiyun 	status->aeqc_phase = true;
2516*4882a593Smuzhiyun }
2517*4882a593Smuzhiyun 
qm_eq_ctx_cfg(struct hisi_qm * qm)2518*4882a593Smuzhiyun static int qm_eq_ctx_cfg(struct hisi_qm *qm)
2519*4882a593Smuzhiyun {
2520*4882a593Smuzhiyun 	struct device *dev = &qm->pdev->dev;
2521*4882a593Smuzhiyun 	struct qm_eqc *eqc;
2522*4882a593Smuzhiyun 	struct qm_aeqc *aeqc;
2523*4882a593Smuzhiyun 	dma_addr_t eqc_dma;
2524*4882a593Smuzhiyun 	dma_addr_t aeqc_dma;
2525*4882a593Smuzhiyun 	int ret;
2526*4882a593Smuzhiyun 
2527*4882a593Smuzhiyun 	qm_init_eq_aeq_status(qm);
2528*4882a593Smuzhiyun 
2529*4882a593Smuzhiyun 	eqc = kzalloc(sizeof(struct qm_eqc), GFP_KERNEL);
2530*4882a593Smuzhiyun 	if (!eqc)
2531*4882a593Smuzhiyun 		return -ENOMEM;
2532*4882a593Smuzhiyun 	eqc_dma = dma_map_single(dev, eqc, sizeof(struct qm_eqc),
2533*4882a593Smuzhiyun 				 DMA_TO_DEVICE);
2534*4882a593Smuzhiyun 	if (dma_mapping_error(dev, eqc_dma)) {
2535*4882a593Smuzhiyun 		kfree(eqc);
2536*4882a593Smuzhiyun 		return -ENOMEM;
2537*4882a593Smuzhiyun 	}
2538*4882a593Smuzhiyun 
2539*4882a593Smuzhiyun 	eqc->base_l = cpu_to_le32(lower_32_bits(qm->eqe_dma));
2540*4882a593Smuzhiyun 	eqc->base_h = cpu_to_le32(upper_32_bits(qm->eqe_dma));
2541*4882a593Smuzhiyun 	if (qm->ver == QM_HW_V1)
2542*4882a593Smuzhiyun 		eqc->dw3 = cpu_to_le32(QM_EQE_AEQE_SIZE);
2543*4882a593Smuzhiyun 	eqc->dw6 = cpu_to_le32((QM_EQ_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
2544*4882a593Smuzhiyun 	ret = qm_mb(qm, QM_MB_CMD_EQC, eqc_dma, 0, 0);
2545*4882a593Smuzhiyun 	dma_unmap_single(dev, eqc_dma, sizeof(struct qm_eqc), DMA_TO_DEVICE);
2546*4882a593Smuzhiyun 	kfree(eqc);
2547*4882a593Smuzhiyun 	if (ret)
2548*4882a593Smuzhiyun 		return ret;
2549*4882a593Smuzhiyun 
2550*4882a593Smuzhiyun 	aeqc = kzalloc(sizeof(struct qm_aeqc), GFP_KERNEL);
2551*4882a593Smuzhiyun 	if (!aeqc)
2552*4882a593Smuzhiyun 		return -ENOMEM;
2553*4882a593Smuzhiyun 	aeqc_dma = dma_map_single(dev, aeqc, sizeof(struct qm_aeqc),
2554*4882a593Smuzhiyun 				  DMA_TO_DEVICE);
2555*4882a593Smuzhiyun 	if (dma_mapping_error(dev, aeqc_dma)) {
2556*4882a593Smuzhiyun 		kfree(aeqc);
2557*4882a593Smuzhiyun 		return -ENOMEM;
2558*4882a593Smuzhiyun 	}
2559*4882a593Smuzhiyun 
2560*4882a593Smuzhiyun 	aeqc->base_l = cpu_to_le32(lower_32_bits(qm->aeqe_dma));
2561*4882a593Smuzhiyun 	aeqc->base_h = cpu_to_le32(upper_32_bits(qm->aeqe_dma));
2562*4882a593Smuzhiyun 	aeqc->dw6 = cpu_to_le32((QM_Q_DEPTH - 1) | (1 << QM_EQC_PHASE_SHIFT));
2563*4882a593Smuzhiyun 
2564*4882a593Smuzhiyun 	ret = qm_mb(qm, QM_MB_CMD_AEQC, aeqc_dma, 0, 0);
2565*4882a593Smuzhiyun 	dma_unmap_single(dev, aeqc_dma, sizeof(struct qm_aeqc), DMA_TO_DEVICE);
2566*4882a593Smuzhiyun 	kfree(aeqc);
2567*4882a593Smuzhiyun 
2568*4882a593Smuzhiyun 	return ret;
2569*4882a593Smuzhiyun }
2570*4882a593Smuzhiyun 
__hisi_qm_start(struct hisi_qm * qm)2571*4882a593Smuzhiyun static int __hisi_qm_start(struct hisi_qm *qm)
2572*4882a593Smuzhiyun {
2573*4882a593Smuzhiyun 	int ret;
2574*4882a593Smuzhiyun 
2575*4882a593Smuzhiyun 	WARN_ON(!qm->qdma.dma);
2576*4882a593Smuzhiyun 
2577*4882a593Smuzhiyun 	if (qm->fun_type == QM_HW_PF) {
2578*4882a593Smuzhiyun 		ret = qm_dev_mem_reset(qm);
2579*4882a593Smuzhiyun 		if (ret)
2580*4882a593Smuzhiyun 			return ret;
2581*4882a593Smuzhiyun 
2582*4882a593Smuzhiyun 		ret = hisi_qm_set_vft(qm, 0, qm->qp_base, qm->qp_num);
2583*4882a593Smuzhiyun 		if (ret)
2584*4882a593Smuzhiyun 			return ret;
2585*4882a593Smuzhiyun 	}
2586*4882a593Smuzhiyun 
2587*4882a593Smuzhiyun 	ret = qm_eq_ctx_cfg(qm);
2588*4882a593Smuzhiyun 	if (ret)
2589*4882a593Smuzhiyun 		return ret;
2590*4882a593Smuzhiyun 
2591*4882a593Smuzhiyun 	ret = qm_mb(qm, QM_MB_CMD_SQC_BT, qm->sqc_dma, 0, 0);
2592*4882a593Smuzhiyun 	if (ret)
2593*4882a593Smuzhiyun 		return ret;
2594*4882a593Smuzhiyun 
2595*4882a593Smuzhiyun 	ret = qm_mb(qm, QM_MB_CMD_CQC_BT, qm->cqc_dma, 0, 0);
2596*4882a593Smuzhiyun 	if (ret)
2597*4882a593Smuzhiyun 		return ret;
2598*4882a593Smuzhiyun 
2599*4882a593Smuzhiyun 	writel(0x0, qm->io_base + QM_VF_EQ_INT_MASK);
2600*4882a593Smuzhiyun 	writel(0x0, qm->io_base + QM_VF_AEQ_INT_MASK);
2601*4882a593Smuzhiyun 
2602*4882a593Smuzhiyun 	return 0;
2603*4882a593Smuzhiyun }
2604*4882a593Smuzhiyun 
2605*4882a593Smuzhiyun /**
2606*4882a593Smuzhiyun  * hisi_qm_start() - start qm
2607*4882a593Smuzhiyun  * @qm: The qm to be started.
2608*4882a593Smuzhiyun  *
2609*4882a593Smuzhiyun  * This function starts a qm, then we can allocate qp from this qm.
2610*4882a593Smuzhiyun  */
hisi_qm_start(struct hisi_qm * qm)2611*4882a593Smuzhiyun int hisi_qm_start(struct hisi_qm *qm)
2612*4882a593Smuzhiyun {
2613*4882a593Smuzhiyun 	struct device *dev = &qm->pdev->dev;
2614*4882a593Smuzhiyun 	int ret = 0;
2615*4882a593Smuzhiyun 
2616*4882a593Smuzhiyun 	down_write(&qm->qps_lock);
2617*4882a593Smuzhiyun 
2618*4882a593Smuzhiyun 	if (!qm_avail_state(qm, QM_START)) {
2619*4882a593Smuzhiyun 		up_write(&qm->qps_lock);
2620*4882a593Smuzhiyun 		return -EPERM;
2621*4882a593Smuzhiyun 	}
2622*4882a593Smuzhiyun 
2623*4882a593Smuzhiyun 	dev_dbg(dev, "qm start with %d queue pairs\n", qm->qp_num);
2624*4882a593Smuzhiyun 
2625*4882a593Smuzhiyun 	if (!qm->qp_num) {
2626*4882a593Smuzhiyun 		dev_err(dev, "qp_num should not be 0\n");
2627*4882a593Smuzhiyun 		ret = -EINVAL;
2628*4882a593Smuzhiyun 		goto err_unlock;
2629*4882a593Smuzhiyun 	}
2630*4882a593Smuzhiyun 
2631*4882a593Smuzhiyun 	ret = __hisi_qm_start(qm);
2632*4882a593Smuzhiyun 	if (!ret)
2633*4882a593Smuzhiyun 		atomic_set(&qm->status.flags, QM_START);
2634*4882a593Smuzhiyun 
2635*4882a593Smuzhiyun err_unlock:
2636*4882a593Smuzhiyun 	up_write(&qm->qps_lock);
2637*4882a593Smuzhiyun 	return ret;
2638*4882a593Smuzhiyun }
2639*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hisi_qm_start);
2640*4882a593Smuzhiyun 
qm_restart(struct hisi_qm * qm)2641*4882a593Smuzhiyun static int qm_restart(struct hisi_qm *qm)
2642*4882a593Smuzhiyun {
2643*4882a593Smuzhiyun 	struct device *dev = &qm->pdev->dev;
2644*4882a593Smuzhiyun 	struct hisi_qp *qp;
2645*4882a593Smuzhiyun 	int ret, i;
2646*4882a593Smuzhiyun 
2647*4882a593Smuzhiyun 	ret = hisi_qm_start(qm);
2648*4882a593Smuzhiyun 	if (ret < 0)
2649*4882a593Smuzhiyun 		return ret;
2650*4882a593Smuzhiyun 
2651*4882a593Smuzhiyun 	down_write(&qm->qps_lock);
2652*4882a593Smuzhiyun 	for (i = 0; i < qm->qp_num; i++) {
2653*4882a593Smuzhiyun 		qp = &qm->qp_array[i];
2654*4882a593Smuzhiyun 		if (atomic_read(&qp->qp_status.flags) == QP_STOP &&
2655*4882a593Smuzhiyun 		    qp->is_resetting == true) {
2656*4882a593Smuzhiyun 			ret = qm_start_qp_nolock(qp, 0);
2657*4882a593Smuzhiyun 			if (ret < 0) {
2658*4882a593Smuzhiyun 				dev_err(dev, "Failed to start qp%d!\n", i);
2659*4882a593Smuzhiyun 
2660*4882a593Smuzhiyun 				up_write(&qm->qps_lock);
2661*4882a593Smuzhiyun 				return ret;
2662*4882a593Smuzhiyun 			}
2663*4882a593Smuzhiyun 			qp->is_resetting = false;
2664*4882a593Smuzhiyun 		}
2665*4882a593Smuzhiyun 	}
2666*4882a593Smuzhiyun 	up_write(&qm->qps_lock);
2667*4882a593Smuzhiyun 
2668*4882a593Smuzhiyun 	return 0;
2669*4882a593Smuzhiyun }
2670*4882a593Smuzhiyun 
2671*4882a593Smuzhiyun /* Stop started qps in reset flow */
qm_stop_started_qp(struct hisi_qm * qm)2672*4882a593Smuzhiyun static int qm_stop_started_qp(struct hisi_qm *qm)
2673*4882a593Smuzhiyun {
2674*4882a593Smuzhiyun 	struct device *dev = &qm->pdev->dev;
2675*4882a593Smuzhiyun 	struct hisi_qp *qp;
2676*4882a593Smuzhiyun 	int i, ret;
2677*4882a593Smuzhiyun 
2678*4882a593Smuzhiyun 	for (i = 0; i < qm->qp_num; i++) {
2679*4882a593Smuzhiyun 		qp = &qm->qp_array[i];
2680*4882a593Smuzhiyun 		if (qp && atomic_read(&qp->qp_status.flags) == QP_START) {
2681*4882a593Smuzhiyun 			qp->is_resetting = true;
2682*4882a593Smuzhiyun 			ret = qm_stop_qp_nolock(qp);
2683*4882a593Smuzhiyun 			if (ret < 0) {
2684*4882a593Smuzhiyun 				dev_err(dev, "Failed to stop qp%d!\n", i);
2685*4882a593Smuzhiyun 				return ret;
2686*4882a593Smuzhiyun 			}
2687*4882a593Smuzhiyun 		}
2688*4882a593Smuzhiyun 	}
2689*4882a593Smuzhiyun 
2690*4882a593Smuzhiyun 	return 0;
2691*4882a593Smuzhiyun }
2692*4882a593Smuzhiyun 
2693*4882a593Smuzhiyun /**
2694*4882a593Smuzhiyun  * This function clears all queues memory in a qm. Reset of accelerator can
2695*4882a593Smuzhiyun  * use this to clear queues.
2696*4882a593Smuzhiyun  */
qm_clear_queues(struct hisi_qm * qm)2697*4882a593Smuzhiyun static void qm_clear_queues(struct hisi_qm *qm)
2698*4882a593Smuzhiyun {
2699*4882a593Smuzhiyun 	struct hisi_qp *qp;
2700*4882a593Smuzhiyun 	int i;
2701*4882a593Smuzhiyun 
2702*4882a593Smuzhiyun 	for (i = 0; i < qm->qp_num; i++) {
2703*4882a593Smuzhiyun 		qp = &qm->qp_array[i];
2704*4882a593Smuzhiyun 		if (qp->is_resetting)
2705*4882a593Smuzhiyun 			memset(qp->qdma.va, 0, qp->qdma.size);
2706*4882a593Smuzhiyun 	}
2707*4882a593Smuzhiyun 
2708*4882a593Smuzhiyun 	memset(qm->qdma.va, 0, qm->qdma.size);
2709*4882a593Smuzhiyun }
2710*4882a593Smuzhiyun 
2711*4882a593Smuzhiyun /**
2712*4882a593Smuzhiyun  * hisi_qm_stop() - Stop a qm.
2713*4882a593Smuzhiyun  * @qm: The qm which will be stopped.
2714*4882a593Smuzhiyun  * @r: The reason to stop qm.
2715*4882a593Smuzhiyun  *
2716*4882a593Smuzhiyun  * This function stops qm and its qps, then qm can not accept request.
2717*4882a593Smuzhiyun  * Related resources are not released at this state, we can use hisi_qm_start
2718*4882a593Smuzhiyun  * to let qm start again.
2719*4882a593Smuzhiyun  */
hisi_qm_stop(struct hisi_qm * qm,enum qm_stop_reason r)2720*4882a593Smuzhiyun int hisi_qm_stop(struct hisi_qm *qm, enum qm_stop_reason r)
2721*4882a593Smuzhiyun {
2722*4882a593Smuzhiyun 	struct device *dev = &qm->pdev->dev;
2723*4882a593Smuzhiyun 	int ret = 0;
2724*4882a593Smuzhiyun 
2725*4882a593Smuzhiyun 	down_write(&qm->qps_lock);
2726*4882a593Smuzhiyun 
2727*4882a593Smuzhiyun 	qm->status.stop_reason = r;
2728*4882a593Smuzhiyun 	if (!qm_avail_state(qm, QM_STOP)) {
2729*4882a593Smuzhiyun 		ret = -EPERM;
2730*4882a593Smuzhiyun 		goto err_unlock;
2731*4882a593Smuzhiyun 	}
2732*4882a593Smuzhiyun 
2733*4882a593Smuzhiyun 	if (qm->status.stop_reason == QM_SOFT_RESET ||
2734*4882a593Smuzhiyun 	    qm->status.stop_reason == QM_FLR) {
2735*4882a593Smuzhiyun 		ret = qm_stop_started_qp(qm);
2736*4882a593Smuzhiyun 		if (ret < 0) {
2737*4882a593Smuzhiyun 			dev_err(dev, "Failed to stop started qp!\n");
2738*4882a593Smuzhiyun 			goto err_unlock;
2739*4882a593Smuzhiyun 		}
2740*4882a593Smuzhiyun 	}
2741*4882a593Smuzhiyun 
2742*4882a593Smuzhiyun 	/* Mask eq and aeq irq */
2743*4882a593Smuzhiyun 	writel(0x1, qm->io_base + QM_VF_EQ_INT_MASK);
2744*4882a593Smuzhiyun 	writel(0x1, qm->io_base + QM_VF_AEQ_INT_MASK);
2745*4882a593Smuzhiyun 
2746*4882a593Smuzhiyun 	if (qm->fun_type == QM_HW_PF) {
2747*4882a593Smuzhiyun 		ret = hisi_qm_set_vft(qm, 0, 0, 0);
2748*4882a593Smuzhiyun 		if (ret < 0) {
2749*4882a593Smuzhiyun 			dev_err(dev, "Failed to set vft!\n");
2750*4882a593Smuzhiyun 			ret = -EBUSY;
2751*4882a593Smuzhiyun 			goto err_unlock;
2752*4882a593Smuzhiyun 		}
2753*4882a593Smuzhiyun 	}
2754*4882a593Smuzhiyun 
2755*4882a593Smuzhiyun 	qm_clear_queues(qm);
2756*4882a593Smuzhiyun 	atomic_set(&qm->status.flags, QM_STOP);
2757*4882a593Smuzhiyun 
2758*4882a593Smuzhiyun err_unlock:
2759*4882a593Smuzhiyun 	up_write(&qm->qps_lock);
2760*4882a593Smuzhiyun 	return ret;
2761*4882a593Smuzhiyun }
2762*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hisi_qm_stop);
2763*4882a593Smuzhiyun 
qm_status_read(struct file * filp,char __user * buffer,size_t count,loff_t * pos)2764*4882a593Smuzhiyun static ssize_t qm_status_read(struct file *filp, char __user *buffer,
2765*4882a593Smuzhiyun 			      size_t count, loff_t *pos)
2766*4882a593Smuzhiyun {
2767*4882a593Smuzhiyun 	struct hisi_qm *qm = filp->private_data;
2768*4882a593Smuzhiyun 	char buf[QM_DBG_READ_LEN];
2769*4882a593Smuzhiyun 	int val, len;
2770*4882a593Smuzhiyun 
2771*4882a593Smuzhiyun 	val = atomic_read(&qm->status.flags);
2772*4882a593Smuzhiyun 	len = scnprintf(buf, QM_DBG_READ_LEN, "%s\n", qm_s[val]);
2773*4882a593Smuzhiyun 
2774*4882a593Smuzhiyun 	return simple_read_from_buffer(buffer, count, pos, buf, len);
2775*4882a593Smuzhiyun }
2776*4882a593Smuzhiyun 
2777*4882a593Smuzhiyun static const struct file_operations qm_status_fops = {
2778*4882a593Smuzhiyun 	.owner = THIS_MODULE,
2779*4882a593Smuzhiyun 	.open = simple_open,
2780*4882a593Smuzhiyun 	.read = qm_status_read,
2781*4882a593Smuzhiyun };
2782*4882a593Smuzhiyun 
qm_debugfs_atomic64_set(void * data,u64 val)2783*4882a593Smuzhiyun static int qm_debugfs_atomic64_set(void *data, u64 val)
2784*4882a593Smuzhiyun {
2785*4882a593Smuzhiyun 	if (val)
2786*4882a593Smuzhiyun 		return -EINVAL;
2787*4882a593Smuzhiyun 
2788*4882a593Smuzhiyun 	atomic64_set((atomic64_t *)data, 0);
2789*4882a593Smuzhiyun 
2790*4882a593Smuzhiyun 	return 0;
2791*4882a593Smuzhiyun }
2792*4882a593Smuzhiyun 
qm_debugfs_atomic64_get(void * data,u64 * val)2793*4882a593Smuzhiyun static int qm_debugfs_atomic64_get(void *data, u64 *val)
2794*4882a593Smuzhiyun {
2795*4882a593Smuzhiyun 	*val = atomic64_read((atomic64_t *)data);
2796*4882a593Smuzhiyun 
2797*4882a593Smuzhiyun 	return 0;
2798*4882a593Smuzhiyun }
2799*4882a593Smuzhiyun 
2800*4882a593Smuzhiyun DEFINE_DEBUGFS_ATTRIBUTE(qm_atomic64_ops, qm_debugfs_atomic64_get,
2801*4882a593Smuzhiyun 			 qm_debugfs_atomic64_set, "%llu\n");
2802*4882a593Smuzhiyun 
2803*4882a593Smuzhiyun /**
2804*4882a593Smuzhiyun  * hisi_qm_debug_init() - Initialize qm related debugfs files.
2805*4882a593Smuzhiyun  * @qm: The qm for which we want to add debugfs files.
2806*4882a593Smuzhiyun  *
2807*4882a593Smuzhiyun  * Create qm related debugfs files.
2808*4882a593Smuzhiyun  */
hisi_qm_debug_init(struct hisi_qm * qm)2809*4882a593Smuzhiyun int hisi_qm_debug_init(struct hisi_qm *qm)
2810*4882a593Smuzhiyun {
2811*4882a593Smuzhiyun 	struct qm_dfx *dfx = &qm->debug.dfx;
2812*4882a593Smuzhiyun 	struct dentry *qm_d;
2813*4882a593Smuzhiyun 	void *data;
2814*4882a593Smuzhiyun 	int i, ret;
2815*4882a593Smuzhiyun 
2816*4882a593Smuzhiyun 	qm_d = debugfs_create_dir("qm", qm->debug.debug_root);
2817*4882a593Smuzhiyun 	qm->debug.qm_d = qm_d;
2818*4882a593Smuzhiyun 
2819*4882a593Smuzhiyun 	/* only show this in PF */
2820*4882a593Smuzhiyun 	if (qm->fun_type == QM_HW_PF)
2821*4882a593Smuzhiyun 		for (i = CURRENT_Q; i < DEBUG_FILE_NUM; i++)
2822*4882a593Smuzhiyun 			if (qm_create_debugfs_file(qm, i)) {
2823*4882a593Smuzhiyun 				ret = -ENOENT;
2824*4882a593Smuzhiyun 				goto failed_to_create;
2825*4882a593Smuzhiyun 			}
2826*4882a593Smuzhiyun 
2827*4882a593Smuzhiyun 	debugfs_create_file("regs", 0444, qm->debug.qm_d, qm, &qm_regs_fops);
2828*4882a593Smuzhiyun 
2829*4882a593Smuzhiyun 	debugfs_create_file("cmd", 0444, qm->debug.qm_d, qm, &qm_cmd_fops);
2830*4882a593Smuzhiyun 
2831*4882a593Smuzhiyun 	debugfs_create_file("status", 0444, qm->debug.qm_d, qm,
2832*4882a593Smuzhiyun 			&qm_status_fops);
2833*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(qm_dfx_files); i++) {
2834*4882a593Smuzhiyun 		data = (atomic64_t *)((uintptr_t)dfx + qm_dfx_files[i].offset);
2835*4882a593Smuzhiyun 		debugfs_create_file(qm_dfx_files[i].name,
2836*4882a593Smuzhiyun 			0644,
2837*4882a593Smuzhiyun 			qm_d,
2838*4882a593Smuzhiyun 			data,
2839*4882a593Smuzhiyun 			&qm_atomic64_ops);
2840*4882a593Smuzhiyun 	}
2841*4882a593Smuzhiyun 
2842*4882a593Smuzhiyun 	return 0;
2843*4882a593Smuzhiyun 
2844*4882a593Smuzhiyun failed_to_create:
2845*4882a593Smuzhiyun 	debugfs_remove_recursive(qm_d);
2846*4882a593Smuzhiyun 	return ret;
2847*4882a593Smuzhiyun }
2848*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hisi_qm_debug_init);
2849*4882a593Smuzhiyun 
2850*4882a593Smuzhiyun /**
2851*4882a593Smuzhiyun  * hisi_qm_debug_regs_clear() - clear qm debug related registers.
2852*4882a593Smuzhiyun  * @qm: The qm for which we want to clear its debug registers.
2853*4882a593Smuzhiyun  */
hisi_qm_debug_regs_clear(struct hisi_qm * qm)2854*4882a593Smuzhiyun void hisi_qm_debug_regs_clear(struct hisi_qm *qm)
2855*4882a593Smuzhiyun {
2856*4882a593Smuzhiyun 	struct qm_dfx_registers *regs;
2857*4882a593Smuzhiyun 	int i;
2858*4882a593Smuzhiyun 
2859*4882a593Smuzhiyun 	/* clear current_q */
2860*4882a593Smuzhiyun 	writel(0x0, qm->io_base + QM_DFX_SQE_CNT_VF_SQN);
2861*4882a593Smuzhiyun 	writel(0x0, qm->io_base + QM_DFX_CQE_CNT_VF_CQN);
2862*4882a593Smuzhiyun 
2863*4882a593Smuzhiyun 	/*
2864*4882a593Smuzhiyun 	 * these registers are reading and clearing, so clear them after
2865*4882a593Smuzhiyun 	 * reading them.
2866*4882a593Smuzhiyun 	 */
2867*4882a593Smuzhiyun 	writel(0x1, qm->io_base + QM_DFX_CNT_CLR_CE);
2868*4882a593Smuzhiyun 
2869*4882a593Smuzhiyun 	regs = qm_dfx_regs;
2870*4882a593Smuzhiyun 	for (i = 0; i < CNT_CYC_REGS_NUM; i++) {
2871*4882a593Smuzhiyun 		readl(qm->io_base + regs->reg_offset);
2872*4882a593Smuzhiyun 		regs++;
2873*4882a593Smuzhiyun 	}
2874*4882a593Smuzhiyun 
2875*4882a593Smuzhiyun 	writel(0x0, qm->io_base + QM_DFX_CNT_CLR_CE);
2876*4882a593Smuzhiyun }
2877*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hisi_qm_debug_regs_clear);
2878*4882a593Smuzhiyun 
qm_hw_error_init(struct hisi_qm * qm)2879*4882a593Smuzhiyun static void qm_hw_error_init(struct hisi_qm *qm)
2880*4882a593Smuzhiyun {
2881*4882a593Smuzhiyun 	const struct hisi_qm_err_info *err_info = &qm->err_ini->err_info;
2882*4882a593Smuzhiyun 
2883*4882a593Smuzhiyun 	if (!qm->ops->hw_error_init) {
2884*4882a593Smuzhiyun 		dev_err(&qm->pdev->dev, "QM doesn't support hw error handling!\n");
2885*4882a593Smuzhiyun 		return;
2886*4882a593Smuzhiyun 	}
2887*4882a593Smuzhiyun 
2888*4882a593Smuzhiyun 	qm->ops->hw_error_init(qm, err_info->ce, err_info->nfe, err_info->fe);
2889*4882a593Smuzhiyun }
2890*4882a593Smuzhiyun 
qm_hw_error_uninit(struct hisi_qm * qm)2891*4882a593Smuzhiyun static void qm_hw_error_uninit(struct hisi_qm *qm)
2892*4882a593Smuzhiyun {
2893*4882a593Smuzhiyun 	if (!qm->ops->hw_error_uninit) {
2894*4882a593Smuzhiyun 		dev_err(&qm->pdev->dev, "Unexpected QM hw error uninit!\n");
2895*4882a593Smuzhiyun 		return;
2896*4882a593Smuzhiyun 	}
2897*4882a593Smuzhiyun 
2898*4882a593Smuzhiyun 	qm->ops->hw_error_uninit(qm);
2899*4882a593Smuzhiyun }
2900*4882a593Smuzhiyun 
qm_hw_error_handle(struct hisi_qm * qm)2901*4882a593Smuzhiyun static enum acc_err_result qm_hw_error_handle(struct hisi_qm *qm)
2902*4882a593Smuzhiyun {
2903*4882a593Smuzhiyun 	if (!qm->ops->hw_error_handle) {
2904*4882a593Smuzhiyun 		dev_err(&qm->pdev->dev, "QM doesn't support hw error report!\n");
2905*4882a593Smuzhiyun 		return ACC_ERR_NONE;
2906*4882a593Smuzhiyun 	}
2907*4882a593Smuzhiyun 
2908*4882a593Smuzhiyun 	return qm->ops->hw_error_handle(qm);
2909*4882a593Smuzhiyun }
2910*4882a593Smuzhiyun 
2911*4882a593Smuzhiyun /**
2912*4882a593Smuzhiyun  * hisi_qm_dev_err_init() - Initialize device error configuration.
2913*4882a593Smuzhiyun  * @qm: The qm for which we want to do error initialization.
2914*4882a593Smuzhiyun  *
2915*4882a593Smuzhiyun  * Initialize QM and device error related configuration.
2916*4882a593Smuzhiyun  */
hisi_qm_dev_err_init(struct hisi_qm * qm)2917*4882a593Smuzhiyun void hisi_qm_dev_err_init(struct hisi_qm *qm)
2918*4882a593Smuzhiyun {
2919*4882a593Smuzhiyun 	if (qm->fun_type == QM_HW_VF)
2920*4882a593Smuzhiyun 		return;
2921*4882a593Smuzhiyun 
2922*4882a593Smuzhiyun 	qm_hw_error_init(qm);
2923*4882a593Smuzhiyun 
2924*4882a593Smuzhiyun 	if (!qm->err_ini->hw_err_enable) {
2925*4882a593Smuzhiyun 		dev_err(&qm->pdev->dev, "Device doesn't support hw error init!\n");
2926*4882a593Smuzhiyun 		return;
2927*4882a593Smuzhiyun 	}
2928*4882a593Smuzhiyun 	qm->err_ini->hw_err_enable(qm);
2929*4882a593Smuzhiyun }
2930*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hisi_qm_dev_err_init);
2931*4882a593Smuzhiyun 
2932*4882a593Smuzhiyun /**
2933*4882a593Smuzhiyun  * hisi_qm_dev_err_uninit() - Uninitialize device error configuration.
2934*4882a593Smuzhiyun  * @qm: The qm for which we want to do error uninitialization.
2935*4882a593Smuzhiyun  *
2936*4882a593Smuzhiyun  * Uninitialize QM and device error related configuration.
2937*4882a593Smuzhiyun  */
hisi_qm_dev_err_uninit(struct hisi_qm * qm)2938*4882a593Smuzhiyun void hisi_qm_dev_err_uninit(struct hisi_qm *qm)
2939*4882a593Smuzhiyun {
2940*4882a593Smuzhiyun 	if (qm->fun_type == QM_HW_VF)
2941*4882a593Smuzhiyun 		return;
2942*4882a593Smuzhiyun 
2943*4882a593Smuzhiyun 	qm_hw_error_uninit(qm);
2944*4882a593Smuzhiyun 
2945*4882a593Smuzhiyun 	if (!qm->err_ini->hw_err_disable) {
2946*4882a593Smuzhiyun 		dev_err(&qm->pdev->dev, "Unexpected device hw error uninit!\n");
2947*4882a593Smuzhiyun 		return;
2948*4882a593Smuzhiyun 	}
2949*4882a593Smuzhiyun 	qm->err_ini->hw_err_disable(qm);
2950*4882a593Smuzhiyun }
2951*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hisi_qm_dev_err_uninit);
2952*4882a593Smuzhiyun 
2953*4882a593Smuzhiyun /**
2954*4882a593Smuzhiyun  * hisi_qm_free_qps() - free multiple queue pairs.
2955*4882a593Smuzhiyun  * @qps: The queue pairs need to be freed.
2956*4882a593Smuzhiyun  * @qp_num: The num of queue pairs.
2957*4882a593Smuzhiyun  */
hisi_qm_free_qps(struct hisi_qp ** qps,int qp_num)2958*4882a593Smuzhiyun void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num)
2959*4882a593Smuzhiyun {
2960*4882a593Smuzhiyun 	int i;
2961*4882a593Smuzhiyun 
2962*4882a593Smuzhiyun 	if (!qps || qp_num <= 0)
2963*4882a593Smuzhiyun 		return;
2964*4882a593Smuzhiyun 
2965*4882a593Smuzhiyun 	for (i = qp_num - 1; i >= 0; i--)
2966*4882a593Smuzhiyun 		hisi_qm_release_qp(qps[i]);
2967*4882a593Smuzhiyun }
2968*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hisi_qm_free_qps);
2969*4882a593Smuzhiyun 
free_list(struct list_head * head)2970*4882a593Smuzhiyun static void free_list(struct list_head *head)
2971*4882a593Smuzhiyun {
2972*4882a593Smuzhiyun 	struct hisi_qm_resource *res, *tmp;
2973*4882a593Smuzhiyun 
2974*4882a593Smuzhiyun 	list_for_each_entry_safe(res, tmp, head, list) {
2975*4882a593Smuzhiyun 		list_del(&res->list);
2976*4882a593Smuzhiyun 		kfree(res);
2977*4882a593Smuzhiyun 	}
2978*4882a593Smuzhiyun }
2979*4882a593Smuzhiyun 
hisi_qm_sort_devices(int node,struct list_head * head,struct hisi_qm_list * qm_list)2980*4882a593Smuzhiyun static int hisi_qm_sort_devices(int node, struct list_head *head,
2981*4882a593Smuzhiyun 				struct hisi_qm_list *qm_list)
2982*4882a593Smuzhiyun {
2983*4882a593Smuzhiyun 	struct hisi_qm_resource *res, *tmp;
2984*4882a593Smuzhiyun 	struct hisi_qm *qm;
2985*4882a593Smuzhiyun 	struct list_head *n;
2986*4882a593Smuzhiyun 	struct device *dev;
2987*4882a593Smuzhiyun 	int dev_node = 0;
2988*4882a593Smuzhiyun 
2989*4882a593Smuzhiyun 	list_for_each_entry(qm, &qm_list->list, list) {
2990*4882a593Smuzhiyun 		dev = &qm->pdev->dev;
2991*4882a593Smuzhiyun 
2992*4882a593Smuzhiyun 		if (IS_ENABLED(CONFIG_NUMA)) {
2993*4882a593Smuzhiyun 			dev_node = dev_to_node(dev);
2994*4882a593Smuzhiyun 			if (dev_node < 0)
2995*4882a593Smuzhiyun 				dev_node = 0;
2996*4882a593Smuzhiyun 		}
2997*4882a593Smuzhiyun 
2998*4882a593Smuzhiyun 		res = kzalloc(sizeof(*res), GFP_KERNEL);
2999*4882a593Smuzhiyun 		if (!res)
3000*4882a593Smuzhiyun 			return -ENOMEM;
3001*4882a593Smuzhiyun 
3002*4882a593Smuzhiyun 		res->qm = qm;
3003*4882a593Smuzhiyun 		res->distance = node_distance(dev_node, node);
3004*4882a593Smuzhiyun 		n = head;
3005*4882a593Smuzhiyun 		list_for_each_entry(tmp, head, list) {
3006*4882a593Smuzhiyun 			if (res->distance < tmp->distance) {
3007*4882a593Smuzhiyun 				n = &tmp->list;
3008*4882a593Smuzhiyun 				break;
3009*4882a593Smuzhiyun 			}
3010*4882a593Smuzhiyun 		}
3011*4882a593Smuzhiyun 		list_add_tail(&res->list, n);
3012*4882a593Smuzhiyun 	}
3013*4882a593Smuzhiyun 
3014*4882a593Smuzhiyun 	return 0;
3015*4882a593Smuzhiyun }
3016*4882a593Smuzhiyun 
3017*4882a593Smuzhiyun /**
3018*4882a593Smuzhiyun  * hisi_qm_alloc_qps_node() - Create multiple queue pairs.
3019*4882a593Smuzhiyun  * @qm_list: The list of all available devices.
3020*4882a593Smuzhiyun  * @qp_num: The number of queue pairs need created.
3021*4882a593Smuzhiyun  * @alg_type: The algorithm type.
3022*4882a593Smuzhiyun  * @node: The numa node.
3023*4882a593Smuzhiyun  * @qps: The queue pairs need created.
3024*4882a593Smuzhiyun  *
3025*4882a593Smuzhiyun  * This function will sort all available device according to numa distance.
3026*4882a593Smuzhiyun  * Then try to create all queue pairs from one device, if all devices do
3027*4882a593Smuzhiyun  * not meet the requirements will return error.
3028*4882a593Smuzhiyun  */
hisi_qm_alloc_qps_node(struct hisi_qm_list * qm_list,int qp_num,u8 alg_type,int node,struct hisi_qp ** qps)3029*4882a593Smuzhiyun int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
3030*4882a593Smuzhiyun 			   u8 alg_type, int node, struct hisi_qp **qps)
3031*4882a593Smuzhiyun {
3032*4882a593Smuzhiyun 	struct hisi_qm_resource *tmp;
3033*4882a593Smuzhiyun 	int ret = -ENODEV;
3034*4882a593Smuzhiyun 	LIST_HEAD(head);
3035*4882a593Smuzhiyun 	int i;
3036*4882a593Smuzhiyun 
3037*4882a593Smuzhiyun 	if (!qps || !qm_list || qp_num <= 0)
3038*4882a593Smuzhiyun 		return -EINVAL;
3039*4882a593Smuzhiyun 
3040*4882a593Smuzhiyun 	mutex_lock(&qm_list->lock);
3041*4882a593Smuzhiyun 	if (hisi_qm_sort_devices(node, &head, qm_list)) {
3042*4882a593Smuzhiyun 		mutex_unlock(&qm_list->lock);
3043*4882a593Smuzhiyun 		goto err;
3044*4882a593Smuzhiyun 	}
3045*4882a593Smuzhiyun 
3046*4882a593Smuzhiyun 	list_for_each_entry(tmp, &head, list) {
3047*4882a593Smuzhiyun 		for (i = 0; i < qp_num; i++) {
3048*4882a593Smuzhiyun 			qps[i] = hisi_qm_create_qp(tmp->qm, alg_type);
3049*4882a593Smuzhiyun 			if (IS_ERR(qps[i])) {
3050*4882a593Smuzhiyun 				hisi_qm_free_qps(qps, i);
3051*4882a593Smuzhiyun 				break;
3052*4882a593Smuzhiyun 			}
3053*4882a593Smuzhiyun 		}
3054*4882a593Smuzhiyun 
3055*4882a593Smuzhiyun 		if (i == qp_num) {
3056*4882a593Smuzhiyun 			ret = 0;
3057*4882a593Smuzhiyun 			break;
3058*4882a593Smuzhiyun 		}
3059*4882a593Smuzhiyun 	}
3060*4882a593Smuzhiyun 
3061*4882a593Smuzhiyun 	mutex_unlock(&qm_list->lock);
3062*4882a593Smuzhiyun 	if (ret)
3063*4882a593Smuzhiyun 		pr_info("Failed to create qps, node[%d], alg[%d], qp[%d]!\n",
3064*4882a593Smuzhiyun 			node, alg_type, qp_num);
3065*4882a593Smuzhiyun 
3066*4882a593Smuzhiyun err:
3067*4882a593Smuzhiyun 	free_list(&head);
3068*4882a593Smuzhiyun 	return ret;
3069*4882a593Smuzhiyun }
3070*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hisi_qm_alloc_qps_node);
3071*4882a593Smuzhiyun 
qm_vf_q_assign(struct hisi_qm * qm,u32 num_vfs)3072*4882a593Smuzhiyun static int qm_vf_q_assign(struct hisi_qm *qm, u32 num_vfs)
3073*4882a593Smuzhiyun {
3074*4882a593Smuzhiyun 	u32 remain_q_num, q_num, i, j;
3075*4882a593Smuzhiyun 	u32 q_base = qm->qp_num;
3076*4882a593Smuzhiyun 	int ret;
3077*4882a593Smuzhiyun 
3078*4882a593Smuzhiyun 	if (!num_vfs)
3079*4882a593Smuzhiyun 		return -EINVAL;
3080*4882a593Smuzhiyun 
3081*4882a593Smuzhiyun 	remain_q_num = qm->ctrl_qp_num - qm->qp_num;
3082*4882a593Smuzhiyun 
3083*4882a593Smuzhiyun 	/* If remain queues not enough, return error. */
3084*4882a593Smuzhiyun 	if (qm->ctrl_qp_num < qm->qp_num || remain_q_num < num_vfs)
3085*4882a593Smuzhiyun 		return -EINVAL;
3086*4882a593Smuzhiyun 
3087*4882a593Smuzhiyun 	q_num = remain_q_num / num_vfs;
3088*4882a593Smuzhiyun 	for (i = 1; i <= num_vfs; i++) {
3089*4882a593Smuzhiyun 		if (i == num_vfs)
3090*4882a593Smuzhiyun 			q_num += remain_q_num % num_vfs;
3091*4882a593Smuzhiyun 		ret = hisi_qm_set_vft(qm, i, q_base, q_num);
3092*4882a593Smuzhiyun 		if (ret) {
3093*4882a593Smuzhiyun 			for (j = i; j > 0; j--)
3094*4882a593Smuzhiyun 				hisi_qm_set_vft(qm, j, 0, 0);
3095*4882a593Smuzhiyun 			return ret;
3096*4882a593Smuzhiyun 		}
3097*4882a593Smuzhiyun 		q_base += q_num;
3098*4882a593Smuzhiyun 	}
3099*4882a593Smuzhiyun 
3100*4882a593Smuzhiyun 	return 0;
3101*4882a593Smuzhiyun }
3102*4882a593Smuzhiyun 
qm_clear_vft_config(struct hisi_qm * qm)3103*4882a593Smuzhiyun static int qm_clear_vft_config(struct hisi_qm *qm)
3104*4882a593Smuzhiyun {
3105*4882a593Smuzhiyun 	int ret;
3106*4882a593Smuzhiyun 	u32 i;
3107*4882a593Smuzhiyun 
3108*4882a593Smuzhiyun 	for (i = 1; i <= qm->vfs_num; i++) {
3109*4882a593Smuzhiyun 		ret = hisi_qm_set_vft(qm, i, 0, 0);
3110*4882a593Smuzhiyun 		if (ret)
3111*4882a593Smuzhiyun 			return ret;
3112*4882a593Smuzhiyun 	}
3113*4882a593Smuzhiyun 	qm->vfs_num = 0;
3114*4882a593Smuzhiyun 
3115*4882a593Smuzhiyun 	return 0;
3116*4882a593Smuzhiyun }
3117*4882a593Smuzhiyun 
3118*4882a593Smuzhiyun /**
3119*4882a593Smuzhiyun  * hisi_qm_sriov_enable() - enable virtual functions
3120*4882a593Smuzhiyun  * @pdev: the PCIe device
3121*4882a593Smuzhiyun  * @max_vfs: the number of virtual functions to enable
3122*4882a593Smuzhiyun  *
3123*4882a593Smuzhiyun  * Returns the number of enabled VFs. If there are VFs enabled already or
3124*4882a593Smuzhiyun  * max_vfs is more than the total number of device can be enabled, returns
3125*4882a593Smuzhiyun  * failure.
3126*4882a593Smuzhiyun  */
hisi_qm_sriov_enable(struct pci_dev * pdev,int max_vfs)3127*4882a593Smuzhiyun int hisi_qm_sriov_enable(struct pci_dev *pdev, int max_vfs)
3128*4882a593Smuzhiyun {
3129*4882a593Smuzhiyun 	struct hisi_qm *qm = pci_get_drvdata(pdev);
3130*4882a593Smuzhiyun 	int pre_existing_vfs, num_vfs, total_vfs, ret;
3131*4882a593Smuzhiyun 
3132*4882a593Smuzhiyun 	total_vfs = pci_sriov_get_totalvfs(pdev);
3133*4882a593Smuzhiyun 	pre_existing_vfs = pci_num_vf(pdev);
3134*4882a593Smuzhiyun 	if (pre_existing_vfs) {
3135*4882a593Smuzhiyun 		pci_err(pdev, "%d VFs already enabled. Please disable pre-enabled VFs!\n",
3136*4882a593Smuzhiyun 			pre_existing_vfs);
3137*4882a593Smuzhiyun 		return 0;
3138*4882a593Smuzhiyun 	}
3139*4882a593Smuzhiyun 
3140*4882a593Smuzhiyun 	num_vfs = min_t(int, max_vfs, total_vfs);
3141*4882a593Smuzhiyun 	ret = qm_vf_q_assign(qm, num_vfs);
3142*4882a593Smuzhiyun 	if (ret) {
3143*4882a593Smuzhiyun 		pci_err(pdev, "Can't assign queues for VF!\n");
3144*4882a593Smuzhiyun 		return ret;
3145*4882a593Smuzhiyun 	}
3146*4882a593Smuzhiyun 
3147*4882a593Smuzhiyun 	qm->vfs_num = num_vfs;
3148*4882a593Smuzhiyun 
3149*4882a593Smuzhiyun 	ret = pci_enable_sriov(pdev, num_vfs);
3150*4882a593Smuzhiyun 	if (ret) {
3151*4882a593Smuzhiyun 		pci_err(pdev, "Can't enable VF!\n");
3152*4882a593Smuzhiyun 		qm_clear_vft_config(qm);
3153*4882a593Smuzhiyun 		return ret;
3154*4882a593Smuzhiyun 	}
3155*4882a593Smuzhiyun 
3156*4882a593Smuzhiyun 	pci_info(pdev, "VF enabled, vfs_num(=%d)!\n", num_vfs);
3157*4882a593Smuzhiyun 
3158*4882a593Smuzhiyun 	return num_vfs;
3159*4882a593Smuzhiyun }
3160*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hisi_qm_sriov_enable);
3161*4882a593Smuzhiyun 
3162*4882a593Smuzhiyun /**
3163*4882a593Smuzhiyun  * hisi_qm_sriov_disable - disable virtual functions
3164*4882a593Smuzhiyun  * @pdev: the PCI device.
3165*4882a593Smuzhiyun  * @is_frozen: true when all the VFs are frozen.
3166*4882a593Smuzhiyun  *
3167*4882a593Smuzhiyun  * Return failure if there are VFs assigned already or VF is in used.
3168*4882a593Smuzhiyun  */
hisi_qm_sriov_disable(struct pci_dev * pdev,bool is_frozen)3169*4882a593Smuzhiyun int hisi_qm_sriov_disable(struct pci_dev *pdev, bool is_frozen)
3170*4882a593Smuzhiyun {
3171*4882a593Smuzhiyun 	struct hisi_qm *qm = pci_get_drvdata(pdev);
3172*4882a593Smuzhiyun 
3173*4882a593Smuzhiyun 	if (pci_vfs_assigned(pdev)) {
3174*4882a593Smuzhiyun 		pci_err(pdev, "Failed to disable VFs as VFs are assigned!\n");
3175*4882a593Smuzhiyun 		return -EPERM;
3176*4882a593Smuzhiyun 	}
3177*4882a593Smuzhiyun 
3178*4882a593Smuzhiyun 	/* While VF is in used, SRIOV cannot be disabled. */
3179*4882a593Smuzhiyun 	if (!is_frozen && qm_try_frozen_vfs(pdev, qm->qm_list)) {
3180*4882a593Smuzhiyun 		pci_err(pdev, "Task is using its VF!\n");
3181*4882a593Smuzhiyun 		return -EBUSY;
3182*4882a593Smuzhiyun 	}
3183*4882a593Smuzhiyun 
3184*4882a593Smuzhiyun 	pci_disable_sriov(pdev);
3185*4882a593Smuzhiyun 	return qm_clear_vft_config(qm);
3186*4882a593Smuzhiyun }
3187*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hisi_qm_sriov_disable);
3188*4882a593Smuzhiyun 
3189*4882a593Smuzhiyun /**
3190*4882a593Smuzhiyun  * hisi_qm_sriov_configure - configure the number of VFs
3191*4882a593Smuzhiyun  * @pdev: The PCI device
3192*4882a593Smuzhiyun  * @num_vfs: The number of VFs need enabled
3193*4882a593Smuzhiyun  *
3194*4882a593Smuzhiyun  * Enable SR-IOV according to num_vfs, 0 means disable.
3195*4882a593Smuzhiyun  */
hisi_qm_sriov_configure(struct pci_dev * pdev,int num_vfs)3196*4882a593Smuzhiyun int hisi_qm_sriov_configure(struct pci_dev *pdev, int num_vfs)
3197*4882a593Smuzhiyun {
3198*4882a593Smuzhiyun 	if (num_vfs == 0)
3199*4882a593Smuzhiyun 		return hisi_qm_sriov_disable(pdev, 0);
3200*4882a593Smuzhiyun 	else
3201*4882a593Smuzhiyun 		return hisi_qm_sriov_enable(pdev, num_vfs);
3202*4882a593Smuzhiyun }
3203*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hisi_qm_sriov_configure);
3204*4882a593Smuzhiyun 
qm_dev_err_handle(struct hisi_qm * qm)3205*4882a593Smuzhiyun static enum acc_err_result qm_dev_err_handle(struct hisi_qm *qm)
3206*4882a593Smuzhiyun {
3207*4882a593Smuzhiyun 	u32 err_sts;
3208*4882a593Smuzhiyun 
3209*4882a593Smuzhiyun 	if (!qm->err_ini->get_dev_hw_err_status) {
3210*4882a593Smuzhiyun 		dev_err(&qm->pdev->dev, "Device doesn't support get hw error status!\n");
3211*4882a593Smuzhiyun 		return ACC_ERR_NONE;
3212*4882a593Smuzhiyun 	}
3213*4882a593Smuzhiyun 
3214*4882a593Smuzhiyun 	/* get device hardware error status */
3215*4882a593Smuzhiyun 	err_sts = qm->err_ini->get_dev_hw_err_status(qm);
3216*4882a593Smuzhiyun 	if (err_sts) {
3217*4882a593Smuzhiyun 		if (err_sts & qm->err_ini->err_info.ecc_2bits_mask)
3218*4882a593Smuzhiyun 			qm->err_status.is_dev_ecc_mbit = true;
3219*4882a593Smuzhiyun 
3220*4882a593Smuzhiyun 		if (!qm->err_ini->log_dev_hw_err) {
3221*4882a593Smuzhiyun 			dev_err(&qm->pdev->dev, "Device doesn't support log hw error!\n");
3222*4882a593Smuzhiyun 			return ACC_ERR_NEED_RESET;
3223*4882a593Smuzhiyun 		}
3224*4882a593Smuzhiyun 
3225*4882a593Smuzhiyun 		qm->err_ini->log_dev_hw_err(qm, err_sts);
3226*4882a593Smuzhiyun 		return ACC_ERR_NEED_RESET;
3227*4882a593Smuzhiyun 	}
3228*4882a593Smuzhiyun 
3229*4882a593Smuzhiyun 	return ACC_ERR_RECOVERED;
3230*4882a593Smuzhiyun }
3231*4882a593Smuzhiyun 
qm_process_dev_error(struct hisi_qm * qm)3232*4882a593Smuzhiyun static enum acc_err_result qm_process_dev_error(struct hisi_qm *qm)
3233*4882a593Smuzhiyun {
3234*4882a593Smuzhiyun 	enum acc_err_result qm_ret, dev_ret;
3235*4882a593Smuzhiyun 
3236*4882a593Smuzhiyun 	/* log qm error */
3237*4882a593Smuzhiyun 	qm_ret = qm_hw_error_handle(qm);
3238*4882a593Smuzhiyun 
3239*4882a593Smuzhiyun 	/* log device error */
3240*4882a593Smuzhiyun 	dev_ret = qm_dev_err_handle(qm);
3241*4882a593Smuzhiyun 
3242*4882a593Smuzhiyun 	return (qm_ret == ACC_ERR_NEED_RESET ||
3243*4882a593Smuzhiyun 		dev_ret == ACC_ERR_NEED_RESET) ?
3244*4882a593Smuzhiyun 		ACC_ERR_NEED_RESET : ACC_ERR_RECOVERED;
3245*4882a593Smuzhiyun }
3246*4882a593Smuzhiyun 
3247*4882a593Smuzhiyun /**
3248*4882a593Smuzhiyun  * hisi_qm_dev_err_detected() - Get device and qm error status then log it.
3249*4882a593Smuzhiyun  * @pdev: The PCI device which need report error.
3250*4882a593Smuzhiyun  * @state: The connectivity between CPU and device.
3251*4882a593Smuzhiyun  *
3252*4882a593Smuzhiyun  * We register this function into PCIe AER handlers, It will report device or
3253*4882a593Smuzhiyun  * qm hardware error status when error occur.
3254*4882a593Smuzhiyun  */
hisi_qm_dev_err_detected(struct pci_dev * pdev,pci_channel_state_t state)3255*4882a593Smuzhiyun pci_ers_result_t hisi_qm_dev_err_detected(struct pci_dev *pdev,
3256*4882a593Smuzhiyun 					  pci_channel_state_t state)
3257*4882a593Smuzhiyun {
3258*4882a593Smuzhiyun 	struct hisi_qm *qm = pci_get_drvdata(pdev);
3259*4882a593Smuzhiyun 	enum acc_err_result ret;
3260*4882a593Smuzhiyun 
3261*4882a593Smuzhiyun 	if (pdev->is_virtfn)
3262*4882a593Smuzhiyun 		return PCI_ERS_RESULT_NONE;
3263*4882a593Smuzhiyun 
3264*4882a593Smuzhiyun 	pci_info(pdev, "PCI error detected, state(=%d)!!\n", state);
3265*4882a593Smuzhiyun 	if (state == pci_channel_io_perm_failure)
3266*4882a593Smuzhiyun 		return PCI_ERS_RESULT_DISCONNECT;
3267*4882a593Smuzhiyun 
3268*4882a593Smuzhiyun 	ret = qm_process_dev_error(qm);
3269*4882a593Smuzhiyun 	if (ret == ACC_ERR_NEED_RESET)
3270*4882a593Smuzhiyun 		return PCI_ERS_RESULT_NEED_RESET;
3271*4882a593Smuzhiyun 
3272*4882a593Smuzhiyun 	return PCI_ERS_RESULT_RECOVERED;
3273*4882a593Smuzhiyun }
3274*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hisi_qm_dev_err_detected);
3275*4882a593Smuzhiyun 
qm_get_hw_error_status(struct hisi_qm * qm)3276*4882a593Smuzhiyun static int qm_get_hw_error_status(struct hisi_qm *qm)
3277*4882a593Smuzhiyun {
3278*4882a593Smuzhiyun 	return readl(qm->io_base + QM_ABNORMAL_INT_STATUS);
3279*4882a593Smuzhiyun }
3280*4882a593Smuzhiyun 
qm_check_req_recv(struct hisi_qm * qm)3281*4882a593Smuzhiyun static int qm_check_req_recv(struct hisi_qm *qm)
3282*4882a593Smuzhiyun {
3283*4882a593Smuzhiyun 	struct pci_dev *pdev = qm->pdev;
3284*4882a593Smuzhiyun 	int ret;
3285*4882a593Smuzhiyun 	u32 val;
3286*4882a593Smuzhiyun 
3287*4882a593Smuzhiyun 	writel(ACC_VENDOR_ID_VALUE, qm->io_base + QM_PEH_VENDOR_ID);
3288*4882a593Smuzhiyun 	ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
3289*4882a593Smuzhiyun 					 (val == ACC_VENDOR_ID_VALUE),
3290*4882a593Smuzhiyun 					 POLL_PERIOD, POLL_TIMEOUT);
3291*4882a593Smuzhiyun 	if (ret) {
3292*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Fails to read QM reg!\n");
3293*4882a593Smuzhiyun 		return ret;
3294*4882a593Smuzhiyun 	}
3295*4882a593Smuzhiyun 
3296*4882a593Smuzhiyun 	writel(PCI_VENDOR_ID_HUAWEI, qm->io_base + QM_PEH_VENDOR_ID);
3297*4882a593Smuzhiyun 	ret = readl_relaxed_poll_timeout(qm->io_base + QM_PEH_VENDOR_ID, val,
3298*4882a593Smuzhiyun 					 (val == PCI_VENDOR_ID_HUAWEI),
3299*4882a593Smuzhiyun 					 POLL_PERIOD, POLL_TIMEOUT);
3300*4882a593Smuzhiyun 	if (ret)
3301*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Fails to read QM reg in the second time!\n");
3302*4882a593Smuzhiyun 
3303*4882a593Smuzhiyun 	return ret;
3304*4882a593Smuzhiyun }
3305*4882a593Smuzhiyun 
qm_set_pf_mse(struct hisi_qm * qm,bool set)3306*4882a593Smuzhiyun static int qm_set_pf_mse(struct hisi_qm *qm, bool set)
3307*4882a593Smuzhiyun {
3308*4882a593Smuzhiyun 	struct pci_dev *pdev = qm->pdev;
3309*4882a593Smuzhiyun 	u16 cmd;
3310*4882a593Smuzhiyun 	int i;
3311*4882a593Smuzhiyun 
3312*4882a593Smuzhiyun 	pci_read_config_word(pdev, PCI_COMMAND, &cmd);
3313*4882a593Smuzhiyun 	if (set)
3314*4882a593Smuzhiyun 		cmd |= PCI_COMMAND_MEMORY;
3315*4882a593Smuzhiyun 	else
3316*4882a593Smuzhiyun 		cmd &= ~PCI_COMMAND_MEMORY;
3317*4882a593Smuzhiyun 
3318*4882a593Smuzhiyun 	pci_write_config_word(pdev, PCI_COMMAND, cmd);
3319*4882a593Smuzhiyun 	for (i = 0; i < MAX_WAIT_COUNTS; i++) {
3320*4882a593Smuzhiyun 		pci_read_config_word(pdev, PCI_COMMAND, &cmd);
3321*4882a593Smuzhiyun 		if (set == ((cmd & PCI_COMMAND_MEMORY) >> 1))
3322*4882a593Smuzhiyun 			return 0;
3323*4882a593Smuzhiyun 
3324*4882a593Smuzhiyun 		udelay(1);
3325*4882a593Smuzhiyun 	}
3326*4882a593Smuzhiyun 
3327*4882a593Smuzhiyun 	return -ETIMEDOUT;
3328*4882a593Smuzhiyun }
3329*4882a593Smuzhiyun 
qm_set_vf_mse(struct hisi_qm * qm,bool set)3330*4882a593Smuzhiyun static int qm_set_vf_mse(struct hisi_qm *qm, bool set)
3331*4882a593Smuzhiyun {
3332*4882a593Smuzhiyun 	struct pci_dev *pdev = qm->pdev;
3333*4882a593Smuzhiyun 	u16 sriov_ctrl;
3334*4882a593Smuzhiyun 	int pos;
3335*4882a593Smuzhiyun 	int i;
3336*4882a593Smuzhiyun 
3337*4882a593Smuzhiyun 	pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_SRIOV);
3338*4882a593Smuzhiyun 	pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl);
3339*4882a593Smuzhiyun 	if (set)
3340*4882a593Smuzhiyun 		sriov_ctrl |= PCI_SRIOV_CTRL_MSE;
3341*4882a593Smuzhiyun 	else
3342*4882a593Smuzhiyun 		sriov_ctrl &= ~PCI_SRIOV_CTRL_MSE;
3343*4882a593Smuzhiyun 	pci_write_config_word(pdev, pos + PCI_SRIOV_CTRL, sriov_ctrl);
3344*4882a593Smuzhiyun 
3345*4882a593Smuzhiyun 	for (i = 0; i < MAX_WAIT_COUNTS; i++) {
3346*4882a593Smuzhiyun 		pci_read_config_word(pdev, pos + PCI_SRIOV_CTRL, &sriov_ctrl);
3347*4882a593Smuzhiyun 		if (set == (sriov_ctrl & PCI_SRIOV_CTRL_MSE) >>
3348*4882a593Smuzhiyun 		    ACC_PEH_SRIOV_CTRL_VF_MSE_SHIFT)
3349*4882a593Smuzhiyun 			return 0;
3350*4882a593Smuzhiyun 
3351*4882a593Smuzhiyun 		udelay(1);
3352*4882a593Smuzhiyun 	}
3353*4882a593Smuzhiyun 
3354*4882a593Smuzhiyun 	return -ETIMEDOUT;
3355*4882a593Smuzhiyun }
3356*4882a593Smuzhiyun 
qm_set_msi(struct hisi_qm * qm,bool set)3357*4882a593Smuzhiyun static int qm_set_msi(struct hisi_qm *qm, bool set)
3358*4882a593Smuzhiyun {
3359*4882a593Smuzhiyun 	struct pci_dev *pdev = qm->pdev;
3360*4882a593Smuzhiyun 
3361*4882a593Smuzhiyun 	if (set) {
3362*4882a593Smuzhiyun 		pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
3363*4882a593Smuzhiyun 				       0);
3364*4882a593Smuzhiyun 	} else {
3365*4882a593Smuzhiyun 		pci_write_config_dword(pdev, pdev->msi_cap + PCI_MSI_MASK_64,
3366*4882a593Smuzhiyun 				       ACC_PEH_MSI_DISABLE);
3367*4882a593Smuzhiyun 		if (qm->err_status.is_qm_ecc_mbit ||
3368*4882a593Smuzhiyun 		    qm->err_status.is_dev_ecc_mbit)
3369*4882a593Smuzhiyun 			return 0;
3370*4882a593Smuzhiyun 
3371*4882a593Smuzhiyun 		mdelay(1);
3372*4882a593Smuzhiyun 		if (readl(qm->io_base + QM_PEH_DFX_INFO0))
3373*4882a593Smuzhiyun 			return -EFAULT;
3374*4882a593Smuzhiyun 	}
3375*4882a593Smuzhiyun 
3376*4882a593Smuzhiyun 	return 0;
3377*4882a593Smuzhiyun }
3378*4882a593Smuzhiyun 
qm_vf_reset_prepare(struct hisi_qm * qm,enum qm_stop_reason stop_reason)3379*4882a593Smuzhiyun static int qm_vf_reset_prepare(struct hisi_qm *qm,
3380*4882a593Smuzhiyun 			       enum qm_stop_reason stop_reason)
3381*4882a593Smuzhiyun {
3382*4882a593Smuzhiyun 	struct hisi_qm_list *qm_list = qm->qm_list;
3383*4882a593Smuzhiyun 	struct pci_dev *pdev = qm->pdev;
3384*4882a593Smuzhiyun 	struct pci_dev *virtfn;
3385*4882a593Smuzhiyun 	struct hisi_qm *vf_qm;
3386*4882a593Smuzhiyun 	int ret = 0;
3387*4882a593Smuzhiyun 
3388*4882a593Smuzhiyun 	mutex_lock(&qm_list->lock);
3389*4882a593Smuzhiyun 	list_for_each_entry(vf_qm, &qm_list->list, list) {
3390*4882a593Smuzhiyun 		virtfn = vf_qm->pdev;
3391*4882a593Smuzhiyun 		if (virtfn == pdev)
3392*4882a593Smuzhiyun 			continue;
3393*4882a593Smuzhiyun 
3394*4882a593Smuzhiyun 		if (pci_physfn(virtfn) == pdev) {
3395*4882a593Smuzhiyun 			/* save VFs PCIE BAR configuration */
3396*4882a593Smuzhiyun 			pci_save_state(virtfn);
3397*4882a593Smuzhiyun 
3398*4882a593Smuzhiyun 			ret = hisi_qm_stop(vf_qm, stop_reason);
3399*4882a593Smuzhiyun 			if (ret)
3400*4882a593Smuzhiyun 				goto stop_fail;
3401*4882a593Smuzhiyun 		}
3402*4882a593Smuzhiyun 	}
3403*4882a593Smuzhiyun 
3404*4882a593Smuzhiyun stop_fail:
3405*4882a593Smuzhiyun 	mutex_unlock(&qm_list->lock);
3406*4882a593Smuzhiyun 	return ret;
3407*4882a593Smuzhiyun }
3408*4882a593Smuzhiyun 
qm_reset_prepare_ready(struct hisi_qm * qm)3409*4882a593Smuzhiyun static int qm_reset_prepare_ready(struct hisi_qm *qm)
3410*4882a593Smuzhiyun {
3411*4882a593Smuzhiyun 	struct pci_dev *pdev = qm->pdev;
3412*4882a593Smuzhiyun 	struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
3413*4882a593Smuzhiyun 	int delay = 0;
3414*4882a593Smuzhiyun 
3415*4882a593Smuzhiyun 	/* All reset requests need to be queued for processing */
3416*4882a593Smuzhiyun 	while (test_and_set_bit(QM_DEV_RESET_FLAG, &pf_qm->reset_flag)) {
3417*4882a593Smuzhiyun 		msleep(++delay);
3418*4882a593Smuzhiyun 		if (delay > QM_RESET_WAIT_TIMEOUT)
3419*4882a593Smuzhiyun 			return -EBUSY;
3420*4882a593Smuzhiyun 	}
3421*4882a593Smuzhiyun 
3422*4882a593Smuzhiyun 	return 0;
3423*4882a593Smuzhiyun }
3424*4882a593Smuzhiyun 
qm_controller_reset_prepare(struct hisi_qm * qm)3425*4882a593Smuzhiyun static int qm_controller_reset_prepare(struct hisi_qm *qm)
3426*4882a593Smuzhiyun {
3427*4882a593Smuzhiyun 	struct pci_dev *pdev = qm->pdev;
3428*4882a593Smuzhiyun 	int ret;
3429*4882a593Smuzhiyun 
3430*4882a593Smuzhiyun 	ret = qm_reset_prepare_ready(qm);
3431*4882a593Smuzhiyun 	if (ret) {
3432*4882a593Smuzhiyun 		pci_err(pdev, "Controller reset not ready!\n");
3433*4882a593Smuzhiyun 		return ret;
3434*4882a593Smuzhiyun 	}
3435*4882a593Smuzhiyun 
3436*4882a593Smuzhiyun 	if (qm->vfs_num) {
3437*4882a593Smuzhiyun 		ret = qm_vf_reset_prepare(qm, QM_SOFT_RESET);
3438*4882a593Smuzhiyun 		if (ret) {
3439*4882a593Smuzhiyun 			pci_err(pdev, "Fails to stop VFs!\n");
3440*4882a593Smuzhiyun 			return ret;
3441*4882a593Smuzhiyun 		}
3442*4882a593Smuzhiyun 	}
3443*4882a593Smuzhiyun 
3444*4882a593Smuzhiyun 	ret = hisi_qm_stop(qm, QM_SOFT_RESET);
3445*4882a593Smuzhiyun 	if (ret) {
3446*4882a593Smuzhiyun 		pci_err(pdev, "Fails to stop QM!\n");
3447*4882a593Smuzhiyun 		return ret;
3448*4882a593Smuzhiyun 	}
3449*4882a593Smuzhiyun 
3450*4882a593Smuzhiyun 	return 0;
3451*4882a593Smuzhiyun }
3452*4882a593Smuzhiyun 
qm_dev_ecc_mbit_handle(struct hisi_qm * qm)3453*4882a593Smuzhiyun static void qm_dev_ecc_mbit_handle(struct hisi_qm *qm)
3454*4882a593Smuzhiyun {
3455*4882a593Smuzhiyun 	u32 nfe_enb = 0;
3456*4882a593Smuzhiyun 
3457*4882a593Smuzhiyun 	if (!qm->err_status.is_dev_ecc_mbit &&
3458*4882a593Smuzhiyun 	    qm->err_status.is_qm_ecc_mbit &&
3459*4882a593Smuzhiyun 	    qm->err_ini->close_axi_master_ooo) {
3460*4882a593Smuzhiyun 
3461*4882a593Smuzhiyun 		qm->err_ini->close_axi_master_ooo(qm);
3462*4882a593Smuzhiyun 
3463*4882a593Smuzhiyun 	} else if (qm->err_status.is_dev_ecc_mbit &&
3464*4882a593Smuzhiyun 		   !qm->err_status.is_qm_ecc_mbit &&
3465*4882a593Smuzhiyun 		   !qm->err_ini->close_axi_master_ooo) {
3466*4882a593Smuzhiyun 
3467*4882a593Smuzhiyun 		nfe_enb = readl(qm->io_base + QM_RAS_NFE_ENABLE);
3468*4882a593Smuzhiyun 		writel(nfe_enb & QM_RAS_NFE_MBIT_DISABLE,
3469*4882a593Smuzhiyun 		       qm->io_base + QM_RAS_NFE_ENABLE);
3470*4882a593Smuzhiyun 		writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SET);
3471*4882a593Smuzhiyun 	}
3472*4882a593Smuzhiyun }
3473*4882a593Smuzhiyun 
qm_soft_reset(struct hisi_qm * qm)3474*4882a593Smuzhiyun static int qm_soft_reset(struct hisi_qm *qm)
3475*4882a593Smuzhiyun {
3476*4882a593Smuzhiyun 	struct pci_dev *pdev = qm->pdev;
3477*4882a593Smuzhiyun 	int ret;
3478*4882a593Smuzhiyun 	u32 val;
3479*4882a593Smuzhiyun 
3480*4882a593Smuzhiyun 	/* Ensure all doorbells and mailboxes received by QM */
3481*4882a593Smuzhiyun 	ret = qm_check_req_recv(qm);
3482*4882a593Smuzhiyun 	if (ret)
3483*4882a593Smuzhiyun 		return ret;
3484*4882a593Smuzhiyun 
3485*4882a593Smuzhiyun 	if (qm->vfs_num) {
3486*4882a593Smuzhiyun 		ret = qm_set_vf_mse(qm, false);
3487*4882a593Smuzhiyun 		if (ret) {
3488*4882a593Smuzhiyun 			pci_err(pdev, "Fails to disable vf MSE bit.\n");
3489*4882a593Smuzhiyun 			return ret;
3490*4882a593Smuzhiyun 		}
3491*4882a593Smuzhiyun 	}
3492*4882a593Smuzhiyun 
3493*4882a593Smuzhiyun 	ret = qm_set_msi(qm, false);
3494*4882a593Smuzhiyun 	if (ret) {
3495*4882a593Smuzhiyun 		pci_err(pdev, "Fails to disable PEH MSI bit.\n");
3496*4882a593Smuzhiyun 		return ret;
3497*4882a593Smuzhiyun 	}
3498*4882a593Smuzhiyun 
3499*4882a593Smuzhiyun 	qm_dev_ecc_mbit_handle(qm);
3500*4882a593Smuzhiyun 
3501*4882a593Smuzhiyun 	/* OOO register set and check */
3502*4882a593Smuzhiyun 	writel(ACC_MASTER_GLOBAL_CTRL_SHUTDOWN,
3503*4882a593Smuzhiyun 	       qm->io_base + ACC_MASTER_GLOBAL_CTRL);
3504*4882a593Smuzhiyun 
3505*4882a593Smuzhiyun 	/* If bus lock, reset chip */
3506*4882a593Smuzhiyun 	ret = readl_relaxed_poll_timeout(qm->io_base + ACC_MASTER_TRANS_RETURN,
3507*4882a593Smuzhiyun 					 val,
3508*4882a593Smuzhiyun 					 (val == ACC_MASTER_TRANS_RETURN_RW),
3509*4882a593Smuzhiyun 					 POLL_PERIOD, POLL_TIMEOUT);
3510*4882a593Smuzhiyun 	if (ret) {
3511*4882a593Smuzhiyun 		pci_emerg(pdev, "Bus lock! Please reset system.\n");
3512*4882a593Smuzhiyun 		return ret;
3513*4882a593Smuzhiyun 	}
3514*4882a593Smuzhiyun 
3515*4882a593Smuzhiyun 	ret = qm_set_pf_mse(qm, false);
3516*4882a593Smuzhiyun 	if (ret) {
3517*4882a593Smuzhiyun 		pci_err(pdev, "Fails to disable pf MSE bit.\n");
3518*4882a593Smuzhiyun 		return ret;
3519*4882a593Smuzhiyun 	}
3520*4882a593Smuzhiyun 
3521*4882a593Smuzhiyun 	/* The reset related sub-control registers are not in PCI BAR */
3522*4882a593Smuzhiyun 	if (ACPI_HANDLE(&pdev->dev)) {
3523*4882a593Smuzhiyun 		unsigned long long value = 0;
3524*4882a593Smuzhiyun 		acpi_status s;
3525*4882a593Smuzhiyun 
3526*4882a593Smuzhiyun 		s = acpi_evaluate_integer(ACPI_HANDLE(&pdev->dev),
3527*4882a593Smuzhiyun 					  qm->err_ini->err_info.acpi_rst,
3528*4882a593Smuzhiyun 					  NULL, &value);
3529*4882a593Smuzhiyun 		if (ACPI_FAILURE(s)) {
3530*4882a593Smuzhiyun 			pci_err(pdev, "NO controller reset method!\n");
3531*4882a593Smuzhiyun 			return -EIO;
3532*4882a593Smuzhiyun 		}
3533*4882a593Smuzhiyun 
3534*4882a593Smuzhiyun 		if (value) {
3535*4882a593Smuzhiyun 			pci_err(pdev, "Reset step %llu failed!\n", value);
3536*4882a593Smuzhiyun 			return -EIO;
3537*4882a593Smuzhiyun 		}
3538*4882a593Smuzhiyun 	} else {
3539*4882a593Smuzhiyun 		pci_err(pdev, "No reset method!\n");
3540*4882a593Smuzhiyun 		return -EINVAL;
3541*4882a593Smuzhiyun 	}
3542*4882a593Smuzhiyun 
3543*4882a593Smuzhiyun 	return 0;
3544*4882a593Smuzhiyun }
3545*4882a593Smuzhiyun 
qm_vf_reset_done(struct hisi_qm * qm)3546*4882a593Smuzhiyun static int qm_vf_reset_done(struct hisi_qm *qm)
3547*4882a593Smuzhiyun {
3548*4882a593Smuzhiyun 	struct hisi_qm_list *qm_list = qm->qm_list;
3549*4882a593Smuzhiyun 	struct pci_dev *pdev = qm->pdev;
3550*4882a593Smuzhiyun 	struct pci_dev *virtfn;
3551*4882a593Smuzhiyun 	struct hisi_qm *vf_qm;
3552*4882a593Smuzhiyun 	int ret = 0;
3553*4882a593Smuzhiyun 
3554*4882a593Smuzhiyun 	mutex_lock(&qm_list->lock);
3555*4882a593Smuzhiyun 	list_for_each_entry(vf_qm, &qm_list->list, list) {
3556*4882a593Smuzhiyun 		virtfn = vf_qm->pdev;
3557*4882a593Smuzhiyun 		if (virtfn == pdev)
3558*4882a593Smuzhiyun 			continue;
3559*4882a593Smuzhiyun 
3560*4882a593Smuzhiyun 		if (pci_physfn(virtfn) == pdev) {
3561*4882a593Smuzhiyun 			/* enable VFs PCIE BAR configuration */
3562*4882a593Smuzhiyun 			pci_restore_state(virtfn);
3563*4882a593Smuzhiyun 
3564*4882a593Smuzhiyun 			ret = qm_restart(vf_qm);
3565*4882a593Smuzhiyun 			if (ret)
3566*4882a593Smuzhiyun 				goto restart_fail;
3567*4882a593Smuzhiyun 		}
3568*4882a593Smuzhiyun 	}
3569*4882a593Smuzhiyun 
3570*4882a593Smuzhiyun restart_fail:
3571*4882a593Smuzhiyun 	mutex_unlock(&qm_list->lock);
3572*4882a593Smuzhiyun 	return ret;
3573*4882a593Smuzhiyun }
3574*4882a593Smuzhiyun 
qm_get_dev_err_status(struct hisi_qm * qm)3575*4882a593Smuzhiyun static int qm_get_dev_err_status(struct hisi_qm *qm)
3576*4882a593Smuzhiyun {
3577*4882a593Smuzhiyun 	return qm->err_ini->get_dev_hw_err_status(qm);
3578*4882a593Smuzhiyun }
3579*4882a593Smuzhiyun 
qm_dev_hw_init(struct hisi_qm * qm)3580*4882a593Smuzhiyun static int qm_dev_hw_init(struct hisi_qm *qm)
3581*4882a593Smuzhiyun {
3582*4882a593Smuzhiyun 	return qm->err_ini->hw_init(qm);
3583*4882a593Smuzhiyun }
3584*4882a593Smuzhiyun 
qm_restart_prepare(struct hisi_qm * qm)3585*4882a593Smuzhiyun static void qm_restart_prepare(struct hisi_qm *qm)
3586*4882a593Smuzhiyun {
3587*4882a593Smuzhiyun 	u32 value;
3588*4882a593Smuzhiyun 
3589*4882a593Smuzhiyun 	if (!qm->err_status.is_qm_ecc_mbit &&
3590*4882a593Smuzhiyun 	    !qm->err_status.is_dev_ecc_mbit)
3591*4882a593Smuzhiyun 		return;
3592*4882a593Smuzhiyun 
3593*4882a593Smuzhiyun 	/* temporarily close the OOO port used for PEH to write out MSI */
3594*4882a593Smuzhiyun 	value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
3595*4882a593Smuzhiyun 	writel(value & ~qm->err_ini->err_info.msi_wr_port,
3596*4882a593Smuzhiyun 	       qm->io_base + ACC_AM_CFG_PORT_WR_EN);
3597*4882a593Smuzhiyun 
3598*4882a593Smuzhiyun 	/* clear dev ecc 2bit error source if having */
3599*4882a593Smuzhiyun 	value = qm_get_dev_err_status(qm) &
3600*4882a593Smuzhiyun 		qm->err_ini->err_info.ecc_2bits_mask;
3601*4882a593Smuzhiyun 	if (value && qm->err_ini->clear_dev_hw_err_status)
3602*4882a593Smuzhiyun 		qm->err_ini->clear_dev_hw_err_status(qm, value);
3603*4882a593Smuzhiyun 
3604*4882a593Smuzhiyun 	/* clear QM ecc mbit error source */
3605*4882a593Smuzhiyun 	writel(QM_ECC_MBIT, qm->io_base + QM_ABNORMAL_INT_SOURCE);
3606*4882a593Smuzhiyun 
3607*4882a593Smuzhiyun 	/* clear AM Reorder Buffer ecc mbit source */
3608*4882a593Smuzhiyun 	writel(ACC_ROB_ECC_ERR_MULTPL, qm->io_base + ACC_AM_ROB_ECC_INT_STS);
3609*4882a593Smuzhiyun 
3610*4882a593Smuzhiyun 	if (qm->err_ini->open_axi_master_ooo)
3611*4882a593Smuzhiyun 		qm->err_ini->open_axi_master_ooo(qm);
3612*4882a593Smuzhiyun }
3613*4882a593Smuzhiyun 
qm_restart_done(struct hisi_qm * qm)3614*4882a593Smuzhiyun static void qm_restart_done(struct hisi_qm *qm)
3615*4882a593Smuzhiyun {
3616*4882a593Smuzhiyun 	u32 value;
3617*4882a593Smuzhiyun 
3618*4882a593Smuzhiyun 	if (!qm->err_status.is_qm_ecc_mbit &&
3619*4882a593Smuzhiyun 	    !qm->err_status.is_dev_ecc_mbit)
3620*4882a593Smuzhiyun 		return;
3621*4882a593Smuzhiyun 
3622*4882a593Smuzhiyun 	/* open the OOO port for PEH to write out MSI */
3623*4882a593Smuzhiyun 	value = readl(qm->io_base + ACC_AM_CFG_PORT_WR_EN);
3624*4882a593Smuzhiyun 	value |= qm->err_ini->err_info.msi_wr_port;
3625*4882a593Smuzhiyun 	writel(value, qm->io_base + ACC_AM_CFG_PORT_WR_EN);
3626*4882a593Smuzhiyun 
3627*4882a593Smuzhiyun 	qm->err_status.is_qm_ecc_mbit = false;
3628*4882a593Smuzhiyun 	qm->err_status.is_dev_ecc_mbit = false;
3629*4882a593Smuzhiyun }
3630*4882a593Smuzhiyun 
qm_controller_reset_done(struct hisi_qm * qm)3631*4882a593Smuzhiyun static int qm_controller_reset_done(struct hisi_qm *qm)
3632*4882a593Smuzhiyun {
3633*4882a593Smuzhiyun 	struct pci_dev *pdev = qm->pdev;
3634*4882a593Smuzhiyun 	int ret;
3635*4882a593Smuzhiyun 
3636*4882a593Smuzhiyun 	ret = qm_set_msi(qm, true);
3637*4882a593Smuzhiyun 	if (ret) {
3638*4882a593Smuzhiyun 		pci_err(pdev, "Fails to enable PEH MSI bit!\n");
3639*4882a593Smuzhiyun 		return ret;
3640*4882a593Smuzhiyun 	}
3641*4882a593Smuzhiyun 
3642*4882a593Smuzhiyun 	ret = qm_set_pf_mse(qm, true);
3643*4882a593Smuzhiyun 	if (ret) {
3644*4882a593Smuzhiyun 		pci_err(pdev, "Fails to enable pf MSE bit!\n");
3645*4882a593Smuzhiyun 		return ret;
3646*4882a593Smuzhiyun 	}
3647*4882a593Smuzhiyun 
3648*4882a593Smuzhiyun 	if (qm->vfs_num) {
3649*4882a593Smuzhiyun 		ret = qm_set_vf_mse(qm, true);
3650*4882a593Smuzhiyun 		if (ret) {
3651*4882a593Smuzhiyun 			pci_err(pdev, "Fails to enable vf MSE bit!\n");
3652*4882a593Smuzhiyun 			return ret;
3653*4882a593Smuzhiyun 		}
3654*4882a593Smuzhiyun 	}
3655*4882a593Smuzhiyun 
3656*4882a593Smuzhiyun 	ret = qm_dev_hw_init(qm);
3657*4882a593Smuzhiyun 	if (ret) {
3658*4882a593Smuzhiyun 		pci_err(pdev, "Failed to init device\n");
3659*4882a593Smuzhiyun 		return ret;
3660*4882a593Smuzhiyun 	}
3661*4882a593Smuzhiyun 
3662*4882a593Smuzhiyun 	qm_restart_prepare(qm);
3663*4882a593Smuzhiyun 
3664*4882a593Smuzhiyun 	ret = qm_restart(qm);
3665*4882a593Smuzhiyun 	if (ret) {
3666*4882a593Smuzhiyun 		pci_err(pdev, "Failed to start QM!\n");
3667*4882a593Smuzhiyun 		return ret;
3668*4882a593Smuzhiyun 	}
3669*4882a593Smuzhiyun 
3670*4882a593Smuzhiyun 	if (qm->vfs_num) {
3671*4882a593Smuzhiyun 		ret = qm_vf_q_assign(qm, qm->vfs_num);
3672*4882a593Smuzhiyun 		if (ret) {
3673*4882a593Smuzhiyun 			pci_err(pdev, "Failed to assign queue!\n");
3674*4882a593Smuzhiyun 			return ret;
3675*4882a593Smuzhiyun 		}
3676*4882a593Smuzhiyun 	}
3677*4882a593Smuzhiyun 
3678*4882a593Smuzhiyun 	ret = qm_vf_reset_done(qm);
3679*4882a593Smuzhiyun 	if (ret) {
3680*4882a593Smuzhiyun 		pci_err(pdev, "Failed to start VFs!\n");
3681*4882a593Smuzhiyun 		return -EPERM;
3682*4882a593Smuzhiyun 	}
3683*4882a593Smuzhiyun 
3684*4882a593Smuzhiyun 	hisi_qm_dev_err_init(qm);
3685*4882a593Smuzhiyun 	qm_restart_done(qm);
3686*4882a593Smuzhiyun 
3687*4882a593Smuzhiyun 	clear_bit(QM_DEV_RESET_FLAG, &qm->reset_flag);
3688*4882a593Smuzhiyun 
3689*4882a593Smuzhiyun 	return 0;
3690*4882a593Smuzhiyun }
3691*4882a593Smuzhiyun 
qm_controller_reset(struct hisi_qm * qm)3692*4882a593Smuzhiyun static int qm_controller_reset(struct hisi_qm *qm)
3693*4882a593Smuzhiyun {
3694*4882a593Smuzhiyun 	struct pci_dev *pdev = qm->pdev;
3695*4882a593Smuzhiyun 	int ret;
3696*4882a593Smuzhiyun 
3697*4882a593Smuzhiyun 	pci_info(pdev, "Controller resetting...\n");
3698*4882a593Smuzhiyun 
3699*4882a593Smuzhiyun 	ret = qm_controller_reset_prepare(qm);
3700*4882a593Smuzhiyun 	if (ret)
3701*4882a593Smuzhiyun 		return ret;
3702*4882a593Smuzhiyun 
3703*4882a593Smuzhiyun 	ret = qm_soft_reset(qm);
3704*4882a593Smuzhiyun 	if (ret) {
3705*4882a593Smuzhiyun 		pci_err(pdev, "Controller reset failed (%d)\n", ret);
3706*4882a593Smuzhiyun 		return ret;
3707*4882a593Smuzhiyun 	}
3708*4882a593Smuzhiyun 
3709*4882a593Smuzhiyun 	ret = qm_controller_reset_done(qm);
3710*4882a593Smuzhiyun 	if (ret)
3711*4882a593Smuzhiyun 		return ret;
3712*4882a593Smuzhiyun 
3713*4882a593Smuzhiyun 	pci_info(pdev, "Controller reset complete\n");
3714*4882a593Smuzhiyun 
3715*4882a593Smuzhiyun 	return 0;
3716*4882a593Smuzhiyun }
3717*4882a593Smuzhiyun 
3718*4882a593Smuzhiyun /**
3719*4882a593Smuzhiyun  * hisi_qm_dev_slot_reset() - slot reset
3720*4882a593Smuzhiyun  * @pdev: the PCIe device
3721*4882a593Smuzhiyun  *
3722*4882a593Smuzhiyun  * This function offers QM relate PCIe device reset interface. Drivers which
3723*4882a593Smuzhiyun  * use QM can use this function as slot_reset in its struct pci_error_handlers.
3724*4882a593Smuzhiyun  */
hisi_qm_dev_slot_reset(struct pci_dev * pdev)3725*4882a593Smuzhiyun pci_ers_result_t hisi_qm_dev_slot_reset(struct pci_dev *pdev)
3726*4882a593Smuzhiyun {
3727*4882a593Smuzhiyun 	struct hisi_qm *qm = pci_get_drvdata(pdev);
3728*4882a593Smuzhiyun 	int ret;
3729*4882a593Smuzhiyun 
3730*4882a593Smuzhiyun 	if (pdev->is_virtfn)
3731*4882a593Smuzhiyun 		return PCI_ERS_RESULT_RECOVERED;
3732*4882a593Smuzhiyun 
3733*4882a593Smuzhiyun 	pci_aer_clear_nonfatal_status(pdev);
3734*4882a593Smuzhiyun 
3735*4882a593Smuzhiyun 	/* reset pcie device controller */
3736*4882a593Smuzhiyun 	ret = qm_controller_reset(qm);
3737*4882a593Smuzhiyun 	if (ret) {
3738*4882a593Smuzhiyun 		pci_err(pdev, "Controller reset failed (%d)\n", ret);
3739*4882a593Smuzhiyun 		return PCI_ERS_RESULT_DISCONNECT;
3740*4882a593Smuzhiyun 	}
3741*4882a593Smuzhiyun 
3742*4882a593Smuzhiyun 	return PCI_ERS_RESULT_RECOVERED;
3743*4882a593Smuzhiyun }
3744*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hisi_qm_dev_slot_reset);
3745*4882a593Smuzhiyun 
3746*4882a593Smuzhiyun /* check the interrupt is ecc-mbit error or not */
qm_check_dev_error(struct hisi_qm * qm)3747*4882a593Smuzhiyun static int qm_check_dev_error(struct hisi_qm *qm)
3748*4882a593Smuzhiyun {
3749*4882a593Smuzhiyun 	int ret;
3750*4882a593Smuzhiyun 
3751*4882a593Smuzhiyun 	if (qm->fun_type == QM_HW_VF)
3752*4882a593Smuzhiyun 		return 0;
3753*4882a593Smuzhiyun 
3754*4882a593Smuzhiyun 	ret = qm_get_hw_error_status(qm) & QM_ECC_MBIT;
3755*4882a593Smuzhiyun 	if (ret)
3756*4882a593Smuzhiyun 		return ret;
3757*4882a593Smuzhiyun 
3758*4882a593Smuzhiyun 	return (qm_get_dev_err_status(qm) &
3759*4882a593Smuzhiyun 		qm->err_ini->err_info.ecc_2bits_mask);
3760*4882a593Smuzhiyun }
3761*4882a593Smuzhiyun 
hisi_qm_reset_prepare(struct pci_dev * pdev)3762*4882a593Smuzhiyun void hisi_qm_reset_prepare(struct pci_dev *pdev)
3763*4882a593Smuzhiyun {
3764*4882a593Smuzhiyun 	struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
3765*4882a593Smuzhiyun 	struct hisi_qm *qm = pci_get_drvdata(pdev);
3766*4882a593Smuzhiyun 	u32 delay = 0;
3767*4882a593Smuzhiyun 	int ret;
3768*4882a593Smuzhiyun 
3769*4882a593Smuzhiyun 	hisi_qm_dev_err_uninit(pf_qm);
3770*4882a593Smuzhiyun 
3771*4882a593Smuzhiyun 	/*
3772*4882a593Smuzhiyun 	 * Check whether there is an ECC mbit error, If it occurs, need to
3773*4882a593Smuzhiyun 	 * wait for soft reset to fix it.
3774*4882a593Smuzhiyun 	 */
3775*4882a593Smuzhiyun 	while (qm_check_dev_error(pf_qm)) {
3776*4882a593Smuzhiyun 		msleep(++delay);
3777*4882a593Smuzhiyun 		if (delay > QM_RESET_WAIT_TIMEOUT)
3778*4882a593Smuzhiyun 			return;
3779*4882a593Smuzhiyun 	}
3780*4882a593Smuzhiyun 
3781*4882a593Smuzhiyun 	ret = qm_reset_prepare_ready(qm);
3782*4882a593Smuzhiyun 	if (ret) {
3783*4882a593Smuzhiyun 		pci_err(pdev, "FLR not ready!\n");
3784*4882a593Smuzhiyun 		return;
3785*4882a593Smuzhiyun 	}
3786*4882a593Smuzhiyun 
3787*4882a593Smuzhiyun 	if (qm->vfs_num) {
3788*4882a593Smuzhiyun 		ret = qm_vf_reset_prepare(qm, QM_FLR);
3789*4882a593Smuzhiyun 		if (ret) {
3790*4882a593Smuzhiyun 			pci_err(pdev, "Failed to prepare reset, ret = %d.\n",
3791*4882a593Smuzhiyun 				ret);
3792*4882a593Smuzhiyun 			return;
3793*4882a593Smuzhiyun 		}
3794*4882a593Smuzhiyun 	}
3795*4882a593Smuzhiyun 
3796*4882a593Smuzhiyun 	ret = hisi_qm_stop(qm, QM_FLR);
3797*4882a593Smuzhiyun 	if (ret) {
3798*4882a593Smuzhiyun 		pci_err(pdev, "Failed to stop QM, ret = %d.\n", ret);
3799*4882a593Smuzhiyun 		return;
3800*4882a593Smuzhiyun 	}
3801*4882a593Smuzhiyun 
3802*4882a593Smuzhiyun 	pci_info(pdev, "FLR resetting...\n");
3803*4882a593Smuzhiyun }
3804*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hisi_qm_reset_prepare);
3805*4882a593Smuzhiyun 
qm_flr_reset_complete(struct pci_dev * pdev)3806*4882a593Smuzhiyun static bool qm_flr_reset_complete(struct pci_dev *pdev)
3807*4882a593Smuzhiyun {
3808*4882a593Smuzhiyun 	struct pci_dev *pf_pdev = pci_physfn(pdev);
3809*4882a593Smuzhiyun 	struct hisi_qm *qm = pci_get_drvdata(pf_pdev);
3810*4882a593Smuzhiyun 	u32 id;
3811*4882a593Smuzhiyun 
3812*4882a593Smuzhiyun 	pci_read_config_dword(qm->pdev, PCI_COMMAND, &id);
3813*4882a593Smuzhiyun 	if (id == QM_PCI_COMMAND_INVALID) {
3814*4882a593Smuzhiyun 		pci_err(pdev, "Device can not be used!\n");
3815*4882a593Smuzhiyun 		return false;
3816*4882a593Smuzhiyun 	}
3817*4882a593Smuzhiyun 
3818*4882a593Smuzhiyun 	clear_bit(QM_DEV_RESET_FLAG, &qm->reset_flag);
3819*4882a593Smuzhiyun 
3820*4882a593Smuzhiyun 	return true;
3821*4882a593Smuzhiyun }
3822*4882a593Smuzhiyun 
hisi_qm_reset_done(struct pci_dev * pdev)3823*4882a593Smuzhiyun void hisi_qm_reset_done(struct pci_dev *pdev)
3824*4882a593Smuzhiyun {
3825*4882a593Smuzhiyun 	struct hisi_qm *pf_qm = pci_get_drvdata(pci_physfn(pdev));
3826*4882a593Smuzhiyun 	struct hisi_qm *qm = pci_get_drvdata(pdev);
3827*4882a593Smuzhiyun 	int ret;
3828*4882a593Smuzhiyun 
3829*4882a593Smuzhiyun 	hisi_qm_dev_err_init(pf_qm);
3830*4882a593Smuzhiyun 
3831*4882a593Smuzhiyun 	ret = qm_restart(qm);
3832*4882a593Smuzhiyun 	if (ret) {
3833*4882a593Smuzhiyun 		pci_err(pdev, "Failed to start QM, ret = %d.\n", ret);
3834*4882a593Smuzhiyun 		goto flr_done;
3835*4882a593Smuzhiyun 	}
3836*4882a593Smuzhiyun 
3837*4882a593Smuzhiyun 	if (qm->fun_type == QM_HW_PF) {
3838*4882a593Smuzhiyun 		ret = qm_dev_hw_init(qm);
3839*4882a593Smuzhiyun 		if (ret) {
3840*4882a593Smuzhiyun 			pci_err(pdev, "Failed to init PF, ret = %d.\n", ret);
3841*4882a593Smuzhiyun 			goto flr_done;
3842*4882a593Smuzhiyun 		}
3843*4882a593Smuzhiyun 
3844*4882a593Smuzhiyun 		if (!qm->vfs_num)
3845*4882a593Smuzhiyun 			goto flr_done;
3846*4882a593Smuzhiyun 
3847*4882a593Smuzhiyun 		ret = qm_vf_q_assign(qm, qm->vfs_num);
3848*4882a593Smuzhiyun 		if (ret) {
3849*4882a593Smuzhiyun 			pci_err(pdev, "Failed to assign VFs, ret = %d.\n", ret);
3850*4882a593Smuzhiyun 			goto flr_done;
3851*4882a593Smuzhiyun 		}
3852*4882a593Smuzhiyun 
3853*4882a593Smuzhiyun 		ret = qm_vf_reset_done(qm);
3854*4882a593Smuzhiyun 		if (ret) {
3855*4882a593Smuzhiyun 			pci_err(pdev, "Failed to start VFs, ret = %d.\n", ret);
3856*4882a593Smuzhiyun 			goto flr_done;
3857*4882a593Smuzhiyun 		}
3858*4882a593Smuzhiyun 	}
3859*4882a593Smuzhiyun 
3860*4882a593Smuzhiyun flr_done:
3861*4882a593Smuzhiyun 	if (qm_flr_reset_complete(pdev))
3862*4882a593Smuzhiyun 		pci_info(pdev, "FLR reset complete\n");
3863*4882a593Smuzhiyun }
3864*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hisi_qm_reset_done);
3865*4882a593Smuzhiyun 
qm_abnormal_irq(int irq,void * data)3866*4882a593Smuzhiyun static irqreturn_t qm_abnormal_irq(int irq, void *data)
3867*4882a593Smuzhiyun {
3868*4882a593Smuzhiyun 	struct hisi_qm *qm = data;
3869*4882a593Smuzhiyun 	enum acc_err_result ret;
3870*4882a593Smuzhiyun 
3871*4882a593Smuzhiyun 	atomic64_inc(&qm->debug.dfx.abnormal_irq_cnt);
3872*4882a593Smuzhiyun 	ret = qm_process_dev_error(qm);
3873*4882a593Smuzhiyun 	if (ret == ACC_ERR_NEED_RESET)
3874*4882a593Smuzhiyun 		schedule_work(&qm->rst_work);
3875*4882a593Smuzhiyun 
3876*4882a593Smuzhiyun 	return IRQ_HANDLED;
3877*4882a593Smuzhiyun }
3878*4882a593Smuzhiyun 
qm_irq_register(struct hisi_qm * qm)3879*4882a593Smuzhiyun static int qm_irq_register(struct hisi_qm *qm)
3880*4882a593Smuzhiyun {
3881*4882a593Smuzhiyun 	struct pci_dev *pdev = qm->pdev;
3882*4882a593Smuzhiyun 	int ret;
3883*4882a593Smuzhiyun 
3884*4882a593Smuzhiyun 	ret = request_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR),
3885*4882a593Smuzhiyun 			  qm_irq, IRQF_SHARED, qm->dev_name, qm);
3886*4882a593Smuzhiyun 	if (ret)
3887*4882a593Smuzhiyun 		return ret;
3888*4882a593Smuzhiyun 
3889*4882a593Smuzhiyun 	if (qm->ver != QM_HW_V1) {
3890*4882a593Smuzhiyun 		ret = request_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR),
3891*4882a593Smuzhiyun 				  qm_aeq_irq, IRQF_SHARED, qm->dev_name, qm);
3892*4882a593Smuzhiyun 		if (ret)
3893*4882a593Smuzhiyun 			goto err_aeq_irq;
3894*4882a593Smuzhiyun 
3895*4882a593Smuzhiyun 		if (qm->fun_type == QM_HW_PF) {
3896*4882a593Smuzhiyun 			ret = request_irq(pci_irq_vector(pdev,
3897*4882a593Smuzhiyun 					  QM_ABNORMAL_EVENT_IRQ_VECTOR),
3898*4882a593Smuzhiyun 					  qm_abnormal_irq, IRQF_SHARED,
3899*4882a593Smuzhiyun 					  qm->dev_name, qm);
3900*4882a593Smuzhiyun 			if (ret)
3901*4882a593Smuzhiyun 				goto err_abonormal_irq;
3902*4882a593Smuzhiyun 		}
3903*4882a593Smuzhiyun 	}
3904*4882a593Smuzhiyun 
3905*4882a593Smuzhiyun 	return 0;
3906*4882a593Smuzhiyun 
3907*4882a593Smuzhiyun err_abonormal_irq:
3908*4882a593Smuzhiyun 	free_irq(pci_irq_vector(pdev, QM_AEQ_EVENT_IRQ_VECTOR), qm);
3909*4882a593Smuzhiyun err_aeq_irq:
3910*4882a593Smuzhiyun 	free_irq(pci_irq_vector(pdev, QM_EQ_EVENT_IRQ_VECTOR), qm);
3911*4882a593Smuzhiyun 	return ret;
3912*4882a593Smuzhiyun }
3913*4882a593Smuzhiyun 
3914*4882a593Smuzhiyun /**
3915*4882a593Smuzhiyun  * hisi_qm_dev_shutdown() - Shutdown device.
3916*4882a593Smuzhiyun  * @pdev: The device will be shutdown.
3917*4882a593Smuzhiyun  *
3918*4882a593Smuzhiyun  * This function will stop qm when OS shutdown or rebooting.
3919*4882a593Smuzhiyun  */
hisi_qm_dev_shutdown(struct pci_dev * pdev)3920*4882a593Smuzhiyun void hisi_qm_dev_shutdown(struct pci_dev *pdev)
3921*4882a593Smuzhiyun {
3922*4882a593Smuzhiyun 	struct hisi_qm *qm = pci_get_drvdata(pdev);
3923*4882a593Smuzhiyun 	int ret;
3924*4882a593Smuzhiyun 
3925*4882a593Smuzhiyun 	ret = hisi_qm_stop(qm, QM_NORMAL);
3926*4882a593Smuzhiyun 	if (ret)
3927*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Fail to stop qm in shutdown!\n");
3928*4882a593Smuzhiyun }
3929*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hisi_qm_dev_shutdown);
3930*4882a593Smuzhiyun 
hisi_qm_controller_reset(struct work_struct * rst_work)3931*4882a593Smuzhiyun static void hisi_qm_controller_reset(struct work_struct *rst_work)
3932*4882a593Smuzhiyun {
3933*4882a593Smuzhiyun 	struct hisi_qm *qm = container_of(rst_work, struct hisi_qm, rst_work);
3934*4882a593Smuzhiyun 	int ret;
3935*4882a593Smuzhiyun 
3936*4882a593Smuzhiyun 	/* reset pcie device controller */
3937*4882a593Smuzhiyun 	ret = qm_controller_reset(qm);
3938*4882a593Smuzhiyun 	if (ret)
3939*4882a593Smuzhiyun 		dev_err(&qm->pdev->dev, "controller reset failed (%d)\n", ret);
3940*4882a593Smuzhiyun 
3941*4882a593Smuzhiyun }
3942*4882a593Smuzhiyun 
3943*4882a593Smuzhiyun /**
3944*4882a593Smuzhiyun  * hisi_qm_alg_register() - Register alg to crypto and add qm to qm_list.
3945*4882a593Smuzhiyun  * @qm: The qm needs add.
3946*4882a593Smuzhiyun  * @qm_list: The qm list.
3947*4882a593Smuzhiyun  *
3948*4882a593Smuzhiyun  * This function adds qm to qm list, and will register algorithm to
3949*4882a593Smuzhiyun  * crypto when the qm list is empty.
3950*4882a593Smuzhiyun  */
hisi_qm_alg_register(struct hisi_qm * qm,struct hisi_qm_list * qm_list)3951*4882a593Smuzhiyun int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
3952*4882a593Smuzhiyun {
3953*4882a593Smuzhiyun 	int flag = 0;
3954*4882a593Smuzhiyun 	int ret = 0;
3955*4882a593Smuzhiyun 
3956*4882a593Smuzhiyun 	mutex_lock(&qm_list->lock);
3957*4882a593Smuzhiyun 	if (list_empty(&qm_list->list))
3958*4882a593Smuzhiyun 		flag = 1;
3959*4882a593Smuzhiyun 	list_add_tail(&qm->list, &qm_list->list);
3960*4882a593Smuzhiyun 	mutex_unlock(&qm_list->lock);
3961*4882a593Smuzhiyun 
3962*4882a593Smuzhiyun 	if (flag) {
3963*4882a593Smuzhiyun 		ret = qm_list->register_to_crypto();
3964*4882a593Smuzhiyun 		if (ret) {
3965*4882a593Smuzhiyun 			mutex_lock(&qm_list->lock);
3966*4882a593Smuzhiyun 			list_del(&qm->list);
3967*4882a593Smuzhiyun 			mutex_unlock(&qm_list->lock);
3968*4882a593Smuzhiyun 		}
3969*4882a593Smuzhiyun 	}
3970*4882a593Smuzhiyun 
3971*4882a593Smuzhiyun 	return ret;
3972*4882a593Smuzhiyun }
3973*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hisi_qm_alg_register);
3974*4882a593Smuzhiyun 
3975*4882a593Smuzhiyun /**
3976*4882a593Smuzhiyun  * hisi_qm_alg_unregister() - Unregister alg from crypto and delete qm from
3977*4882a593Smuzhiyun  * qm list.
3978*4882a593Smuzhiyun  * @qm: The qm needs delete.
3979*4882a593Smuzhiyun  * @qm_list: The qm list.
3980*4882a593Smuzhiyun  *
3981*4882a593Smuzhiyun  * This function deletes qm from qm list, and will unregister algorithm
3982*4882a593Smuzhiyun  * from crypto when the qm list is empty.
3983*4882a593Smuzhiyun  */
hisi_qm_alg_unregister(struct hisi_qm * qm,struct hisi_qm_list * qm_list)3984*4882a593Smuzhiyun void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
3985*4882a593Smuzhiyun {
3986*4882a593Smuzhiyun 	mutex_lock(&qm_list->lock);
3987*4882a593Smuzhiyun 	list_del(&qm->list);
3988*4882a593Smuzhiyun 	mutex_unlock(&qm_list->lock);
3989*4882a593Smuzhiyun 
3990*4882a593Smuzhiyun 	if (list_empty(&qm_list->list))
3991*4882a593Smuzhiyun 		qm_list->unregister_from_crypto();
3992*4882a593Smuzhiyun }
3993*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hisi_qm_alg_unregister);
3994*4882a593Smuzhiyun 
3995*4882a593Smuzhiyun /**
3996*4882a593Smuzhiyun  * hisi_qm_init() - Initialize configures about qm.
3997*4882a593Smuzhiyun  * @qm: The qm needing init.
3998*4882a593Smuzhiyun  *
3999*4882a593Smuzhiyun  * This function init qm, then we can call hisi_qm_start to put qm into work.
4000*4882a593Smuzhiyun  */
hisi_qm_init(struct hisi_qm * qm)4001*4882a593Smuzhiyun int hisi_qm_init(struct hisi_qm *qm)
4002*4882a593Smuzhiyun {
4003*4882a593Smuzhiyun 	struct pci_dev *pdev = qm->pdev;
4004*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
4005*4882a593Smuzhiyun 	unsigned int num_vec;
4006*4882a593Smuzhiyun 	int ret;
4007*4882a593Smuzhiyun 
4008*4882a593Smuzhiyun 	hisi_qm_pre_init(qm);
4009*4882a593Smuzhiyun 
4010*4882a593Smuzhiyun 	ret = qm_alloc_uacce(qm);
4011*4882a593Smuzhiyun 	if (ret < 0)
4012*4882a593Smuzhiyun 		dev_warn(&pdev->dev, "fail to alloc uacce (%d)\n", ret);
4013*4882a593Smuzhiyun 
4014*4882a593Smuzhiyun 	ret = pci_enable_device_mem(pdev);
4015*4882a593Smuzhiyun 	if (ret < 0) {
4016*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Failed to enable device mem!\n");
4017*4882a593Smuzhiyun 		goto err_remove_uacce;
4018*4882a593Smuzhiyun 	}
4019*4882a593Smuzhiyun 
4020*4882a593Smuzhiyun 	ret = pci_request_mem_regions(pdev, qm->dev_name);
4021*4882a593Smuzhiyun 	if (ret < 0) {
4022*4882a593Smuzhiyun 		dev_err(&pdev->dev, "Failed to request mem regions!\n");
4023*4882a593Smuzhiyun 		goto err_disable_pcidev;
4024*4882a593Smuzhiyun 	}
4025*4882a593Smuzhiyun 
4026*4882a593Smuzhiyun 	qm->phys_base = pci_resource_start(pdev, PCI_BAR_2);
4027*4882a593Smuzhiyun 	qm->phys_size = pci_resource_len(qm->pdev, PCI_BAR_2);
4028*4882a593Smuzhiyun 	qm->io_base = ioremap(qm->phys_base, qm->phys_size);
4029*4882a593Smuzhiyun 	if (!qm->io_base) {
4030*4882a593Smuzhiyun 		ret = -EIO;
4031*4882a593Smuzhiyun 		goto err_release_mem_regions;
4032*4882a593Smuzhiyun 	}
4033*4882a593Smuzhiyun 
4034*4882a593Smuzhiyun 	ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64));
4035*4882a593Smuzhiyun 	if (ret < 0)
4036*4882a593Smuzhiyun 		goto err_iounmap;
4037*4882a593Smuzhiyun 	pci_set_master(pdev);
4038*4882a593Smuzhiyun 
4039*4882a593Smuzhiyun 	if (!qm->ops->get_irq_num) {
4040*4882a593Smuzhiyun 		ret = -EOPNOTSUPP;
4041*4882a593Smuzhiyun 		goto err_iounmap;
4042*4882a593Smuzhiyun 	}
4043*4882a593Smuzhiyun 	num_vec = qm->ops->get_irq_num(qm);
4044*4882a593Smuzhiyun 	ret = pci_alloc_irq_vectors(pdev, num_vec, num_vec, PCI_IRQ_MSI);
4045*4882a593Smuzhiyun 	if (ret < 0) {
4046*4882a593Smuzhiyun 		dev_err(dev, "Failed to enable MSI vectors!\n");
4047*4882a593Smuzhiyun 		goto err_iounmap;
4048*4882a593Smuzhiyun 	}
4049*4882a593Smuzhiyun 
4050*4882a593Smuzhiyun 	ret = qm_irq_register(qm);
4051*4882a593Smuzhiyun 	if (ret)
4052*4882a593Smuzhiyun 		goto err_free_irq_vectors;
4053*4882a593Smuzhiyun 
4054*4882a593Smuzhiyun 	if (qm->fun_type == QM_HW_VF && qm->ver != QM_HW_V1) {
4055*4882a593Smuzhiyun 		/* v2 starts to support get vft by mailbox */
4056*4882a593Smuzhiyun 		ret = hisi_qm_get_vft(qm, &qm->qp_base, &qm->qp_num);
4057*4882a593Smuzhiyun 		if (ret)
4058*4882a593Smuzhiyun 			goto err_irq_unregister;
4059*4882a593Smuzhiyun 	}
4060*4882a593Smuzhiyun 
4061*4882a593Smuzhiyun 	ret = hisi_qm_memory_init(qm);
4062*4882a593Smuzhiyun 	if (ret)
4063*4882a593Smuzhiyun 		goto err_irq_unregister;
4064*4882a593Smuzhiyun 
4065*4882a593Smuzhiyun 	INIT_WORK(&qm->work, qm_work_process);
4066*4882a593Smuzhiyun 	if (qm->fun_type == QM_HW_PF)
4067*4882a593Smuzhiyun 		INIT_WORK(&qm->rst_work, hisi_qm_controller_reset);
4068*4882a593Smuzhiyun 
4069*4882a593Smuzhiyun 	atomic_set(&qm->status.flags, QM_INIT);
4070*4882a593Smuzhiyun 
4071*4882a593Smuzhiyun 	return 0;
4072*4882a593Smuzhiyun 
4073*4882a593Smuzhiyun err_irq_unregister:
4074*4882a593Smuzhiyun 	qm_irq_unregister(qm);
4075*4882a593Smuzhiyun err_free_irq_vectors:
4076*4882a593Smuzhiyun 	pci_free_irq_vectors(pdev);
4077*4882a593Smuzhiyun err_iounmap:
4078*4882a593Smuzhiyun 	iounmap(qm->io_base);
4079*4882a593Smuzhiyun err_release_mem_regions:
4080*4882a593Smuzhiyun 	pci_release_mem_regions(pdev);
4081*4882a593Smuzhiyun err_disable_pcidev:
4082*4882a593Smuzhiyun 	pci_disable_device(pdev);
4083*4882a593Smuzhiyun err_remove_uacce:
4084*4882a593Smuzhiyun 	uacce_remove(qm->uacce);
4085*4882a593Smuzhiyun 	qm->uacce = NULL;
4086*4882a593Smuzhiyun 	return ret;
4087*4882a593Smuzhiyun }
4088*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(hisi_qm_init);
4089*4882a593Smuzhiyun 
4090*4882a593Smuzhiyun 
4091*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
4092*4882a593Smuzhiyun MODULE_AUTHOR("Zhou Wang <wangzhou1@hisilicon.com>");
4093*4882a593Smuzhiyun MODULE_DESCRIPTION("HiSilicon Accelerator queue manager driver");
4094